var/home/core/zuul-output/0000755000175000017500000000000015112336654014533 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112353163015471 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004732142315112353154017703 0ustar rootrootNov 28 15:24:29 crc systemd[1]: Starting Kubernetes Kubelet... Nov 28 15:24:29 crc restorecon[4614]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:29 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:30 crc restorecon[4614]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:24:30 crc restorecon[4614]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 28 15:24:30 crc kubenswrapper[4647]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 15:24:30 crc kubenswrapper[4647]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 28 15:24:30 crc kubenswrapper[4647]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 15:24:30 crc kubenswrapper[4647]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 15:24:30 crc kubenswrapper[4647]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 28 15:24:30 crc kubenswrapper[4647]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.251126 4647 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254816 4647 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254837 4647 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254843 4647 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254848 4647 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254855 4647 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254861 4647 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254870 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254875 4647 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254880 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254884 4647 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254889 4647 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254894 4647 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254898 4647 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254903 4647 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254907 4647 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254911 4647 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254915 4647 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254920 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254924 4647 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254928 4647 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254933 4647 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254937 4647 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254943 4647 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254949 4647 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254954 4647 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254959 4647 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254963 4647 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254968 4647 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254972 4647 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254977 4647 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254981 4647 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254985 4647 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254990 4647 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.254995 4647 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255000 4647 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255004 4647 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255008 4647 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255013 4647 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255018 4647 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255024 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255028 4647 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255032 4647 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255036 4647 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255041 4647 feature_gate.go:330] unrecognized feature gate: Example Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255045 4647 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255050 4647 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255054 4647 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255059 4647 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255064 4647 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255069 4647 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255075 4647 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255082 4647 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255089 4647 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255094 4647 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255100 4647 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255104 4647 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255108 4647 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255114 4647 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255119 4647 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255125 4647 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255129 4647 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255134 4647 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255139 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255144 4647 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255148 4647 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255153 4647 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255158 4647 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255165 4647 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255170 4647 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255174 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.255180 4647 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255729 4647 flags.go:64] FLAG: --address="0.0.0.0" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255746 4647 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255758 4647 flags.go:64] FLAG: --anonymous-auth="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255765 4647 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255773 4647 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255779 4647 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255787 4647 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255794 4647 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255799 4647 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255804 4647 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255810 4647 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255816 4647 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255821 4647 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255826 4647 flags.go:64] FLAG: --cgroup-root="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255832 4647 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255837 4647 flags.go:64] FLAG: --client-ca-file="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255842 4647 flags.go:64] FLAG: --cloud-config="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255846 4647 flags.go:64] FLAG: --cloud-provider="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255852 4647 flags.go:64] FLAG: --cluster-dns="[]" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255860 4647 flags.go:64] FLAG: --cluster-domain="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255865 4647 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255870 4647 flags.go:64] FLAG: --config-dir="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255875 4647 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255881 4647 flags.go:64] FLAG: --container-log-max-files="5" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255888 4647 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255894 4647 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255899 4647 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255905 4647 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255910 4647 flags.go:64] FLAG: --contention-profiling="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255915 4647 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255921 4647 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255927 4647 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255932 4647 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255939 4647 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255945 4647 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255950 4647 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255955 4647 flags.go:64] FLAG: --enable-load-reader="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255960 4647 flags.go:64] FLAG: --enable-server="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255966 4647 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255973 4647 flags.go:64] FLAG: --event-burst="100" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255979 4647 flags.go:64] FLAG: --event-qps="50" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255984 4647 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255989 4647 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.255994 4647 flags.go:64] FLAG: --eviction-hard="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256000 4647 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256005 4647 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256011 4647 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256016 4647 flags.go:64] FLAG: --eviction-soft="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256021 4647 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256026 4647 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256031 4647 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256036 4647 flags.go:64] FLAG: --experimental-mounter-path="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256041 4647 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256046 4647 flags.go:64] FLAG: --fail-swap-on="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256051 4647 flags.go:64] FLAG: --feature-gates="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256057 4647 flags.go:64] FLAG: --file-check-frequency="20s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256062 4647 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256067 4647 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256074 4647 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256079 4647 flags.go:64] FLAG: --healthz-port="10248" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256084 4647 flags.go:64] FLAG: --help="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256089 4647 flags.go:64] FLAG: --hostname-override="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256094 4647 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256100 4647 flags.go:64] FLAG: --http-check-frequency="20s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256106 4647 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256111 4647 flags.go:64] FLAG: --image-credential-provider-config="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256115 4647 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256120 4647 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256126 4647 flags.go:64] FLAG: --image-service-endpoint="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256131 4647 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256136 4647 flags.go:64] FLAG: --kube-api-burst="100" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256141 4647 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256147 4647 flags.go:64] FLAG: --kube-api-qps="50" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256152 4647 flags.go:64] FLAG: --kube-reserved="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256157 4647 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256161 4647 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256167 4647 flags.go:64] FLAG: --kubelet-cgroups="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256172 4647 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256177 4647 flags.go:64] FLAG: --lock-file="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256182 4647 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256187 4647 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256192 4647 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256200 4647 flags.go:64] FLAG: --log-json-split-stream="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256205 4647 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256210 4647 flags.go:64] FLAG: --log-text-split-stream="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256214 4647 flags.go:64] FLAG: --logging-format="text" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256220 4647 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256226 4647 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256230 4647 flags.go:64] FLAG: --manifest-url="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256235 4647 flags.go:64] FLAG: --manifest-url-header="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256243 4647 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256250 4647 flags.go:64] FLAG: --max-open-files="1000000" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256257 4647 flags.go:64] FLAG: --max-pods="110" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256264 4647 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256271 4647 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256276 4647 flags.go:64] FLAG: --memory-manager-policy="None" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256282 4647 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256287 4647 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256292 4647 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256297 4647 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256309 4647 flags.go:64] FLAG: --node-status-max-images="50" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256314 4647 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256319 4647 flags.go:64] FLAG: --oom-score-adj="-999" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256325 4647 flags.go:64] FLAG: --pod-cidr="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256330 4647 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256339 4647 flags.go:64] FLAG: --pod-manifest-path="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256344 4647 flags.go:64] FLAG: --pod-max-pids="-1" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256349 4647 flags.go:64] FLAG: --pods-per-core="0" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256354 4647 flags.go:64] FLAG: --port="10250" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256359 4647 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256364 4647 flags.go:64] FLAG: --provider-id="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256369 4647 flags.go:64] FLAG: --qos-reserved="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256374 4647 flags.go:64] FLAG: --read-only-port="10255" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256379 4647 flags.go:64] FLAG: --register-node="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256385 4647 flags.go:64] FLAG: --register-schedulable="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256389 4647 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256398 4647 flags.go:64] FLAG: --registry-burst="10" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256403 4647 flags.go:64] FLAG: --registry-qps="5" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256424 4647 flags.go:64] FLAG: --reserved-cpus="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256429 4647 flags.go:64] FLAG: --reserved-memory="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256436 4647 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256441 4647 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256446 4647 flags.go:64] FLAG: --rotate-certificates="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256452 4647 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256457 4647 flags.go:64] FLAG: --runonce="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256463 4647 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256469 4647 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256475 4647 flags.go:64] FLAG: --seccomp-default="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256480 4647 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256485 4647 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256491 4647 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256496 4647 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256501 4647 flags.go:64] FLAG: --storage-driver-password="root" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256506 4647 flags.go:64] FLAG: --storage-driver-secure="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256511 4647 flags.go:64] FLAG: --storage-driver-table="stats" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256516 4647 flags.go:64] FLAG: --storage-driver-user="root" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256522 4647 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256527 4647 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256537 4647 flags.go:64] FLAG: --system-cgroups="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256542 4647 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256551 4647 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256557 4647 flags.go:64] FLAG: --tls-cert-file="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256562 4647 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256570 4647 flags.go:64] FLAG: --tls-min-version="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256575 4647 flags.go:64] FLAG: --tls-private-key-file="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256580 4647 flags.go:64] FLAG: --topology-manager-policy="none" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256585 4647 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256591 4647 flags.go:64] FLAG: --topology-manager-scope="container" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256596 4647 flags.go:64] FLAG: --v="2" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256604 4647 flags.go:64] FLAG: --version="false" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256610 4647 flags.go:64] FLAG: --vmodule="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256617 4647 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.256622 4647 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256773 4647 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256779 4647 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256784 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256790 4647 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256795 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256799 4647 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256804 4647 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256809 4647 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256814 4647 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256818 4647 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256822 4647 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256826 4647 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256831 4647 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256836 4647 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256841 4647 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256848 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256853 4647 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256862 4647 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256867 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256872 4647 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256876 4647 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256881 4647 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256885 4647 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256891 4647 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256895 4647 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256900 4647 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256904 4647 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256908 4647 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256913 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256917 4647 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256922 4647 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256926 4647 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256930 4647 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256935 4647 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256939 4647 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256944 4647 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256948 4647 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256952 4647 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256957 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256961 4647 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256965 4647 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256970 4647 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256974 4647 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256978 4647 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256982 4647 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256986 4647 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256991 4647 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.256996 4647 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257002 4647 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257011 4647 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257016 4647 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257022 4647 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257027 4647 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257031 4647 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257036 4647 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257040 4647 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257046 4647 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257051 4647 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257056 4647 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257062 4647 feature_gate.go:330] unrecognized feature gate: Example Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257067 4647 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257072 4647 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257077 4647 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257081 4647 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257086 4647 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257091 4647 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257096 4647 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257101 4647 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257105 4647 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257111 4647 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.257117 4647 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.257125 4647 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.265883 4647 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.265949 4647 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266107 4647 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266131 4647 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266142 4647 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266151 4647 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266160 4647 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266168 4647 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266178 4647 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266186 4647 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266194 4647 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266203 4647 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266212 4647 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266220 4647 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266229 4647 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266237 4647 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266245 4647 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266253 4647 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266261 4647 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266270 4647 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266278 4647 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266286 4647 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266294 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266302 4647 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266310 4647 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266318 4647 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266325 4647 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266333 4647 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266341 4647 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266349 4647 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266357 4647 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266365 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266373 4647 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266382 4647 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266392 4647 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266405 4647 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266447 4647 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266457 4647 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266466 4647 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266475 4647 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266487 4647 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266496 4647 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266504 4647 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266513 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266522 4647 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266531 4647 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266540 4647 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266547 4647 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266555 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266563 4647 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266571 4647 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266579 4647 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266587 4647 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266595 4647 feature_gate.go:330] unrecognized feature gate: Example Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266602 4647 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266610 4647 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266618 4647 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266626 4647 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266634 4647 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266642 4647 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266650 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266658 4647 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266666 4647 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266673 4647 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266681 4647 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266689 4647 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266698 4647 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266708 4647 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266718 4647 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266727 4647 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266735 4647 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266744 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.266754 4647 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.266768 4647 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267014 4647 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267028 4647 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267038 4647 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267047 4647 feature_gate.go:330] unrecognized feature gate: Example Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267056 4647 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267064 4647 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267073 4647 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267081 4647 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267091 4647 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267103 4647 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267111 4647 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267121 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267131 4647 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267142 4647 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267150 4647 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267160 4647 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267169 4647 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267178 4647 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267187 4647 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267195 4647 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267204 4647 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267212 4647 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267220 4647 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267228 4647 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267237 4647 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267246 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267254 4647 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267262 4647 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267271 4647 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267278 4647 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267286 4647 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267295 4647 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267303 4647 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267313 4647 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267325 4647 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267333 4647 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267341 4647 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267349 4647 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267358 4647 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267367 4647 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267374 4647 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267383 4647 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267391 4647 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267402 4647 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267435 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267445 4647 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267456 4647 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267466 4647 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267475 4647 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267484 4647 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267492 4647 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267501 4647 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267509 4647 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267517 4647 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267525 4647 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267533 4647 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267541 4647 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267549 4647 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267557 4647 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267565 4647 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267573 4647 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267582 4647 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267591 4647 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267602 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267611 4647 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267620 4647 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267628 4647 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267637 4647 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267645 4647 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267654 4647 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.267664 4647 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.267677 4647 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.268251 4647 server.go:940] "Client rotation is on, will bootstrap in background" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.272196 4647 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.272353 4647 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.273367 4647 server.go:997] "Starting client certificate rotation" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.273438 4647 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.273813 4647 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-14 12:02:48.636928835 +0000 UTC Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.273912 4647 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1124h38m18.363018926s for next certificate rotation Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.281141 4647 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.283377 4647 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.294580 4647 log.go:25] "Validated CRI v1 runtime API" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.313306 4647 log.go:25] "Validated CRI v1 image API" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.315659 4647 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.317825 4647 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-28-15-14-37-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.317855 4647 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.327482 4647 manager.go:217] Machine: {Timestamp:2025-11-28 15:24:30.326279625 +0000 UTC m=+0.173886066 CPUVendorID:AuthenticAMD NumCores:8 NumPhysicalCores:1 NumSockets:8 CpuFrequency:2800000 MemoryCapacity:25199480832 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:a49754f3-09f0-421c-a39e-92fe09c4d7bb BootID:aecdc603-c25f-4d6e-8a41-c75b9586cce4 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:3076108 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:12599738368 Type:vfs Inodes:3076108 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:5039898624 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:12599742464 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:2519945216 Type:vfs Inodes:615221 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:429496729600 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:50:43:ea Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:50:43:ea Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:59:d4:25 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:f0:11:f0 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:7c:51:85 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:5c:e9:a9 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:be:34:da:6d:e2:d8 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:0e:02:4a:ef:07:bc Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:25199480832 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.327671 4647 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.327921 4647 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.328259 4647 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.328427 4647 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.328463 4647 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.328796 4647 topology_manager.go:138] "Creating topology manager with none policy" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.328813 4647 container_manager_linux.go:303] "Creating device plugin manager" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.328949 4647 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.328969 4647 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.329207 4647 state_mem.go:36] "Initialized new in-memory state store" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.329287 4647 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.329909 4647 kubelet.go:418] "Attempting to sync node with API server" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.329932 4647 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.329954 4647 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.329966 4647 kubelet.go:324] "Adding apiserver pod source" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.330006 4647 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.331426 4647 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.331803 4647 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.332553 4647 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.332747 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.332853 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.174:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.332748 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333089 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333110 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333118 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333124 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333137 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333145 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333154 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333167 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.333163 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.174:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333177 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333219 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333303 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333316 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.333708 4647 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.334485 4647 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.334626 4647 server.go:1280] "Started kubelet" Nov 28 15:24:30 crc systemd[1]: Started Kubernetes Kubelet. Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.335954 4647 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.336157 4647 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.337089 4647 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.338833 4647 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.339867 4647 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.339964 4647 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 20:07:07.791044502 +0000 UTC Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.340094 4647 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1036h42m37.450955738s for next certificate rotation Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.340272 4647 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.340291 4647 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.340454 4647 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.338651 4647 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.174:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c3509d312bb9e default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 15:24:30.334589854 +0000 UTC m=+0.182196295,LastTimestamp:2025-11-28 15:24:30.334589854 +0000 UTC m=+0.182196295,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.341144 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="200ms" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.341478 4647 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.341515 4647 factory.go:55] Registering systemd factory Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.341523 4647 factory.go:221] Registration of the systemd container factory successfully Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.341933 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.341997 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.174:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.342100 4647 factory.go:153] Registering CRI-O factory Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.342118 4647 factory.go:221] Registration of the crio container factory successfully Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.342161 4647 factory.go:103] Registering Raw factory Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.342175 4647 manager.go:1196] Started watching for new ooms in manager Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.342820 4647 manager.go:319] Starting recovery of all containers Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.344097 4647 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.345456 4647 server.go:460] "Adding debug handlers to kubelet server" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.348609 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.349110 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.349350 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.349487 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.349569 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.349646 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.349721 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.349798 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.349880 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.349953 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.350031 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.350106 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.350183 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.350260 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.350382 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.350478 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.350556 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.350632 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.350710 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.350820 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.351502 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.351609 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.351687 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.351792 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.351876 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.351951 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352033 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352108 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352186 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352277 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352363 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352458 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352541 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352608 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352682 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352763 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352838 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352912 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.352988 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353062 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353215 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353290 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353370 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353484 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353564 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353638 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353724 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353797 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353871 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.353950 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354024 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354100 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354178 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354251 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354327 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354400 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354506 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354581 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354651 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354724 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354806 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354889 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.354967 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355037 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355103 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355172 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355241 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355323 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355394 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355500 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355577 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355647 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355729 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355824 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355897 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.355969 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356040 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356117 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356190 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356260 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356328 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356400 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356495 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356579 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356661 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356743 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.356818 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.357083 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.357188 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.357291 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.357382 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.357504 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.357598 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.357714 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.357823 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.357932 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.358093 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.358195 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361022 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361077 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361101 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361113 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361124 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361141 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361170 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361187 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361206 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361225 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361238 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361253 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361270 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361282 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361297 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361308 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361322 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361335 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361349 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361360 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361372 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361387 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361398 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361426 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361438 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361449 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361463 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361473 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361489 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361501 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361511 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361524 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361537 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361550 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361561 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361573 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361589 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361600 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361614 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361624 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361636 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361650 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361662 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.361673 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.362034 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363402 4647 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363441 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363469 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363483 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363499 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363516 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363529 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363547 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363563 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363574 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363588 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363599 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363610 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363623 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363634 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363649 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363663 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363674 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363689 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363701 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363715 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363727 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363738 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363753 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363803 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363819 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363831 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363844 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363858 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363870 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363882 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363893 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363903 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363917 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363927 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363941 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363951 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363961 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363975 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363986 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.363996 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364008 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364019 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364033 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364043 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364054 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364068 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364079 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364094 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364104 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364116 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364129 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364141 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364153 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364164 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364174 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364186 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364196 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364208 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364219 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364229 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364241 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364251 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364264 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364275 4647 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364284 4647 reconstruct.go:97] "Volume reconstruction finished" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.364291 4647 reconciler.go:26] "Reconciler: start to sync state" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.360203 4647 manager.go:324] Recovery completed Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.380694 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.382398 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.382454 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.382464 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.384951 4647 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.384967 4647 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.384985 4647 state_mem.go:36] "Initialized new in-memory state store" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.390546 4647 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.392342 4647 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.392384 4647 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.393066 4647 kubelet.go:2335] "Starting kubelet main sync loop" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.393113 4647 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 28 15:24:30 crc kubenswrapper[4647]: W1128 15:24:30.393276 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.393333 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.174:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.421764 4647 policy_none.go:49] "None policy: Start" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.423718 4647 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.423743 4647 state_mem.go:35] "Initializing new in-memory state store" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.445162 4647 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.493505 4647 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.515207 4647 manager.go:334] "Starting Device Plugin manager" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.515249 4647 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.515260 4647 server.go:79] "Starting device plugin registration server" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.515691 4647 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.515782 4647 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.515949 4647 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.516359 4647 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.516393 4647 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.526846 4647 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.541622 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="400ms" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.616968 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.618090 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.618141 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.618152 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.618182 4647 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.618804 4647 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.174:6443: connect: connection refused" node="crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.693676 4647 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.693832 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.695160 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.695198 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.695213 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.695357 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.695622 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.695704 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.696339 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.696357 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.696365 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.696450 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.696590 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.696625 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.696994 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697029 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697043 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697042 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697068 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697077 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697142 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697241 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697284 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697842 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697870 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697880 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697842 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697953 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.697964 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698043 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698155 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698210 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698272 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698292 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698300 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698714 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698738 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698746 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698934 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.698961 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.699070 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.699090 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.699099 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.699514 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.699542 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.699561 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.771809 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.771851 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.771876 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.771950 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.771967 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.772055 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.772249 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.772344 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.772468 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.772515 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.772704 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.772740 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.772789 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.772818 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.772849 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.819060 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.820387 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.820479 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.820501 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.820562 4647 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.821261 4647 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.174:6443: connect: connection refused" node="crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874154 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874238 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874300 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874341 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874382 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874394 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874462 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874479 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874508 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874510 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874557 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874622 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874599 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874592 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874589 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874700 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874728 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874738 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874757 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874789 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874806 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874849 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874872 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874889 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874935 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874981 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.875002 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.874936 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.875032 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: I1128 15:24:30.875172 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:30 crc kubenswrapper[4647]: E1128 15:24:30.942489 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="800ms" Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.026487 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.033622 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.049872 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:24:31 crc kubenswrapper[4647]: W1128 15:24:31.051838 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-0f1cb2729d45a527dd8f149db255c87ac1929b333246eb4c59a58a8d3a5eab2c WatchSource:0}: Error finding container 0f1cb2729d45a527dd8f149db255c87ac1929b333246eb4c59a58a8d3a5eab2c: Status 404 returned error can't find the container with id 0f1cb2729d45a527dd8f149db255c87ac1929b333246eb4c59a58a8d3a5eab2c Nov 28 15:24:31 crc kubenswrapper[4647]: W1128 15:24:31.055251 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-b8aafcb953fbe619f0cf200ced7499660c9115c29030b5896896f972644a4517 WatchSource:0}: Error finding container b8aafcb953fbe619f0cf200ced7499660c9115c29030b5896896f972644a4517: Status 404 returned error can't find the container with id b8aafcb953fbe619f0cf200ced7499660c9115c29030b5896896f972644a4517 Nov 28 15:24:31 crc kubenswrapper[4647]: W1128 15:24:31.067940 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-c716f646ce014a37ec05b6a87b3c2689901d761f3104324a0a8b19d1620fd9f7 WatchSource:0}: Error finding container c716f646ce014a37ec05b6a87b3c2689901d761f3104324a0a8b19d1620fd9f7: Status 404 returned error can't find the container with id c716f646ce014a37ec05b6a87b3c2689901d761f3104324a0a8b19d1620fd9f7 Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.073137 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.081355 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 15:24:31 crc kubenswrapper[4647]: W1128 15:24:31.091987 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-e14aece9c1b958498c316985bb66899ac66eaeb7998f46cb700253498cdba2a9 WatchSource:0}: Error finding container e14aece9c1b958498c316985bb66899ac66eaeb7998f46cb700253498cdba2a9: Status 404 returned error can't find the container with id e14aece9c1b958498c316985bb66899ac66eaeb7998f46cb700253498cdba2a9 Nov 28 15:24:31 crc kubenswrapper[4647]: W1128 15:24:31.099326 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-e34eb4f8c8ef9691fb9d6a5a1ed8a0f6fbec808907b4d985428cfb09a66d5083 WatchSource:0}: Error finding container e34eb4f8c8ef9691fb9d6a5a1ed8a0f6fbec808907b4d985428cfb09a66d5083: Status 404 returned error can't find the container with id e34eb4f8c8ef9691fb9d6a5a1ed8a0f6fbec808907b4d985428cfb09a66d5083 Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.221557 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.222981 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.223046 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.223057 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.223110 4647 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:24:31 crc kubenswrapper[4647]: E1128 15:24:31.223853 4647 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.174:6443: connect: connection refused" node="crc" Nov 28 15:24:31 crc kubenswrapper[4647]: W1128 15:24:31.248834 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:31 crc kubenswrapper[4647]: E1128 15:24:31.248967 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.174:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.336018 4647 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.399403 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e34eb4f8c8ef9691fb9d6a5a1ed8a0f6fbec808907b4d985428cfb09a66d5083"} Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.401598 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"e14aece9c1b958498c316985bb66899ac66eaeb7998f46cb700253498cdba2a9"} Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.403924 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c716f646ce014a37ec05b6a87b3c2689901d761f3104324a0a8b19d1620fd9f7"} Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.405488 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0f1cb2729d45a527dd8f149db255c87ac1929b333246eb4c59a58a8d3a5eab2c"} Nov 28 15:24:31 crc kubenswrapper[4647]: I1128 15:24:31.406695 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b8aafcb953fbe619f0cf200ced7499660c9115c29030b5896896f972644a4517"} Nov 28 15:24:31 crc kubenswrapper[4647]: W1128 15:24:31.415900 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:31 crc kubenswrapper[4647]: E1128 15:24:31.416057 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.174:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:24:31 crc kubenswrapper[4647]: W1128 15:24:31.645903 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:31 crc kubenswrapper[4647]: E1128 15:24:31.646012 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.174:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:24:31 crc kubenswrapper[4647]: E1128 15:24:31.743696 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="1.6s" Nov 28 15:24:31 crc kubenswrapper[4647]: W1128 15:24:31.803825 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:31 crc kubenswrapper[4647]: E1128 15:24:31.803912 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.174:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.024504 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.032287 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.032365 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.032385 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.032467 4647 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:24:32 crc kubenswrapper[4647]: E1128 15:24:32.033148 4647 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.174:6443: connect: connection refused" node="crc" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.336292 4647 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.412506 4647 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410" exitCode=0 Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.412601 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410"} Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.412767 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.414370 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.414408 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.414449 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.416115 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.419902 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.419959 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.419972 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.422938 4647 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="81caabfdaec82723b8ce4ee38f19077b584906e5cbb590d3bbaeee44d77dd054" exitCode=0 Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.423027 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"81caabfdaec82723b8ce4ee38f19077b584906e5cbb590d3bbaeee44d77dd054"} Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.423978 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.425488 4647 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="2413f61d4e2dc7da0a0370c1ff166916d09bc91d4fb0ea8087cc10285ce8b6e5" exitCode=0 Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.425562 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"2413f61d4e2dc7da0a0370c1ff166916d09bc91d4fb0ea8087cc10285ce8b6e5"} Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.425651 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.426839 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.426871 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.426887 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.426955 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.426999 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.427051 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.429452 4647 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803" exitCode=0 Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.429581 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.429567 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803"} Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.430555 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.430593 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.430609 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.435081 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720"} Nov 28 15:24:32 crc kubenswrapper[4647]: I1128 15:24:32.435125 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378"} Nov 28 15:24:33 crc kubenswrapper[4647]: E1128 15:24:33.153927 4647 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.174:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c3509d312bb9e default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 15:24:30.334589854 +0000 UTC m=+0.182196295,LastTimestamp:2025-11-28 15:24:30.334589854 +0000 UTC m=+0.182196295,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 15:24:33 crc kubenswrapper[4647]: W1128 15:24:33.162495 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:33 crc kubenswrapper[4647]: E1128 15:24:33.162626 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.174:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:24:33 crc kubenswrapper[4647]: I1128 15:24:33.335669 4647 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:33 crc kubenswrapper[4647]: E1128 15:24:33.344957 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="3.2s" Nov 28 15:24:33 crc kubenswrapper[4647]: I1128 15:24:33.442598 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825"} Nov 28 15:24:33 crc kubenswrapper[4647]: I1128 15:24:33.633985 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:33 crc kubenswrapper[4647]: I1128 15:24:33.636051 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:33 crc kubenswrapper[4647]: I1128 15:24:33.636529 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:33 crc kubenswrapper[4647]: I1128 15:24:33.636693 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:33 crc kubenswrapper[4647]: I1128 15:24:33.636939 4647 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:24:33 crc kubenswrapper[4647]: E1128 15:24:33.637710 4647 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.174:6443: connect: connection refused" node="crc" Nov 28 15:24:33 crc kubenswrapper[4647]: W1128 15:24:33.754273 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:24:33 crc kubenswrapper[4647]: E1128 15:24:33.754439 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.174:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.448347 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"ff90385e25edea709de69c044537f706e0c1e4cad98a7ae55b577a9104f79e6f"} Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.448467 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.449524 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.449563 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.449577 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.452364 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200"} Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.452423 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5"} Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.452435 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3"} Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.452520 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.453125 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.453156 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.453170 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.455278 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549"} Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.455448 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.456424 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.456454 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.456466 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.459121 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421"} Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.459145 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e"} Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.459158 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52"} Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.459168 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574"} Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.461553 4647 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2ddf40314e95ad5216f75c3bc1df4a668417c48792aece0c27a003e8347576cf" exitCode=0 Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.461610 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2ddf40314e95ad5216f75c3bc1df4a668417c48792aece0c27a003e8347576cf"} Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.461706 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.462516 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.462555 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:34 crc kubenswrapper[4647]: I1128 15:24:34.462566 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.471827 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24"} Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.471889 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.473620 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.473711 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.473737 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.476744 4647 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b148d7a9a39458095947e6d65d770b19243421de6e9383095ded1703018b069a" exitCode=0 Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.476897 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.476943 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.476983 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b148d7a9a39458095947e6d65d770b19243421de6e9383095ded1703018b069a"} Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.477047 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.477016 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.477528 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.478863 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.478872 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.478953 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.478902 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.479010 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.478980 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.479079 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.479096 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.479040 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.479146 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.479157 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:35 crc kubenswrapper[4647]: I1128 15:24:35.478981 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.188942 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.198366 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.484619 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6df182c2cf4ed05c0b48e48ac514987a133ab4252b0699d6f25dd3c81992b13a"} Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.484691 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7f6d76d68cb88053d117ce151dcff87171ad0b1757334e4e69986ac81301e2df"} Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.484711 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"eeae41e209c03a7b7154c064cd6238a3ae4f4f9b91204f355bab8cb49c322793"} Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.484781 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.484843 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.484876 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.484908 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.486404 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.486458 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.486492 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.486455 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.486588 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.486603 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.838125 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.839255 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.839328 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.839349 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.839405 4647 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:24:36 crc kubenswrapper[4647]: I1128 15:24:36.982852 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.216294 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.495052 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a17ce56c81f992b4217a104bca435e5aed254f83fe91c74cf838ee9057c4503c"} Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.495818 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"eafb3cd99986cf0e02dcfe091393dcfa21e001d82665695bab283e7d1925651d"} Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.495326 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.495137 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.495387 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.495973 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.497901 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.497900 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.497974 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.498000 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.498003 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.498098 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.498126 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.498129 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:37 crc kubenswrapper[4647]: I1128 15:24:37.498147 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.497974 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.498181 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.499335 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.499402 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.499464 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.500111 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.500149 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.500158 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.641303 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.641579 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.643151 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.643195 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.643207 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:38 crc kubenswrapper[4647]: I1128 15:24:38.808340 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:39 crc kubenswrapper[4647]: I1128 15:24:39.501529 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:39 crc kubenswrapper[4647]: I1128 15:24:39.503050 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:39 crc kubenswrapper[4647]: I1128 15:24:39.503114 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:39 crc kubenswrapper[4647]: I1128 15:24:39.503133 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.045071 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.045503 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.045573 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.047207 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.047264 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.047283 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.169944 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.216225 4647 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.216400 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.506020 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.507861 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.507941 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.508126 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:40 crc kubenswrapper[4647]: E1128 15:24:40.527011 4647 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.867595 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.867925 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.870089 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.870150 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:40 crc kubenswrapper[4647]: I1128 15:24:40.870169 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:42 crc kubenswrapper[4647]: I1128 15:24:42.728522 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:42 crc kubenswrapper[4647]: I1128 15:24:42.728862 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:42 crc kubenswrapper[4647]: I1128 15:24:42.730566 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:42 crc kubenswrapper[4647]: I1128 15:24:42.730620 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:42 crc kubenswrapper[4647]: I1128 15:24:42.730630 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:44 crc kubenswrapper[4647]: I1128 15:24:44.336528 4647 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 28 15:24:44 crc kubenswrapper[4647]: W1128 15:24:44.436053 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 15:24:44 crc kubenswrapper[4647]: I1128 15:24:44.436205 4647 trace.go:236] Trace[828515000]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 15:24:34.434) (total time: 10001ms): Nov 28 15:24:44 crc kubenswrapper[4647]: Trace[828515000]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (15:24:44.436) Nov 28 15:24:44 crc kubenswrapper[4647]: Trace[828515000]: [10.001707939s] [10.001707939s] END Nov 28 15:24:44 crc kubenswrapper[4647]: E1128 15:24:44.436240 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 15:24:44 crc kubenswrapper[4647]: W1128 15:24:44.983567 4647 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 15:24:44 crc kubenswrapper[4647]: I1128 15:24:44.983690 4647 trace.go:236] Trace[2034887600]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 15:24:34.982) (total time: 10001ms): Nov 28 15:24:44 crc kubenswrapper[4647]: Trace[2034887600]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (15:24:44.983) Nov 28 15:24:44 crc kubenswrapper[4647]: Trace[2034887600]: [10.001348113s] [10.001348113s] END Nov 28 15:24:44 crc kubenswrapper[4647]: E1128 15:24:44.983721 4647 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 15:24:45 crc kubenswrapper[4647]: I1128 15:24:45.075946 4647 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 15:24:45 crc kubenswrapper[4647]: I1128 15:24:45.076077 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 15:24:45 crc kubenswrapper[4647]: I1128 15:24:45.088046 4647 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 15:24:45 crc kubenswrapper[4647]: I1128 15:24:45.088120 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.320594 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.320847 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.322087 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.322126 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.322140 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.350440 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.522943 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.524226 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.524291 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.524317 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:46 crc kubenswrapper[4647]: I1128 15:24:46.544127 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 28 15:24:47 crc kubenswrapper[4647]: I1128 15:24:47.525748 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:47 crc kubenswrapper[4647]: I1128 15:24:47.527733 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:47 crc kubenswrapper[4647]: I1128 15:24:47.527803 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:47 crc kubenswrapper[4647]: I1128 15:24:47.527826 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:49 crc kubenswrapper[4647]: I1128 15:24:49.733905 4647 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 15:24:49 crc kubenswrapper[4647]: I1128 15:24:49.779969 4647 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.052093 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.057691 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.072880 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.074322 4647 trace.go:236] Trace[758833650]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 15:24:40.003) (total time: 10070ms): Nov 28 15:24:50 crc kubenswrapper[4647]: Trace[758833650]: ---"Objects listed" error: 10070ms (15:24:50.074) Nov 28 15:24:50 crc kubenswrapper[4647]: Trace[758833650]: [10.070900368s] [10.070900368s] END Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.074358 4647 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.074711 4647 trace.go:236] Trace[337015560]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 15:24:38.966) (total time: 11108ms): Nov 28 15:24:50 crc kubenswrapper[4647]: Trace[337015560]: ---"Objects listed" error: 11108ms (15:24:50.074) Nov 28 15:24:50 crc kubenswrapper[4647]: Trace[337015560]: [11.108314945s] [11.108314945s] END Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.074742 4647 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.076909 4647 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.077093 4647 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.217036 4647 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.217133 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.342286 4647 apiserver.go:52] "Watching apiserver" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.462881 4647 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.463256 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.463936 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.464010 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.464078 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.464377 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.464413 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.464663 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.464740 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.464803 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.465008 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.469836 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.471527 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.472957 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.473194 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.473318 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.473346 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.474685 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.475372 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.475493 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.496751 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.509235 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.520195 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.539068 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.539215 4647 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.544269 4647 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.593100 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.593149 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.593616 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.593742 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.593938 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.593968 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.593989 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594021 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594042 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594061 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594115 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594141 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594166 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594191 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594213 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594238 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594263 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594288 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594315 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594343 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594367 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594389 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594415 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594454 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594481 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594504 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594527 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594552 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594576 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594599 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594621 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594646 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594666 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594689 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594710 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594734 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594760 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594781 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594802 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594918 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594946 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594973 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.594997 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595025 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595072 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595097 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595123 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595146 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595173 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595196 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595220 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595242 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595263 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595283 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595305 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595327 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595351 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595345 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595374 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595403 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595433 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595478 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595502 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595524 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595571 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595597 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595621 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595692 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595733 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595795 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595822 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595847 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595871 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595900 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595946 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595971 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.595996 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596023 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596050 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596073 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596095 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596118 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596140 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596166 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596192 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596222 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596251 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596277 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596302 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596325 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596350 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596376 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596399 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596430 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596472 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596495 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596521 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596545 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596568 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596597 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596623 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596647 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596671 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596696 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596723 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596750 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596779 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596804 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596825 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596853 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596876 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596898 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596922 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596945 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596967 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596991 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597015 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597054 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597078 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597102 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597125 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597149 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597173 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597198 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597225 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597250 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597275 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597301 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597324 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597348 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597371 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597394 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597421 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597460 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597485 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597510 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597535 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597557 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597581 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597606 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597632 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597663 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597689 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597712 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597739 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597764 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597786 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597810 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597834 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597856 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597879 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597905 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597930 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597957 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597984 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.598008 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.598035 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.598064 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.598086 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.598109 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.598133 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638487 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639643 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639740 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639771 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639791 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639812 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639835 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639855 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639876 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639899 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639920 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639939 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639957 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639975 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639992 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640014 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640033 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640052 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640075 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640099 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640116 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640136 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640156 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640175 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640193 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640214 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640237 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640256 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640274 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640294 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640313 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640341 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640363 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640383 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640415 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640434 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640466 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640485 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640531 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640562 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640583 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640606 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640633 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640653 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640676 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640696 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640716 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640736 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640761 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640781 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640805 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640832 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640898 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640911 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.653842 4647 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596191 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.660091 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596496 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.596566 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.597311 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637085 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637149 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637175 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637263 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637200 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637277 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637353 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637400 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637455 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637514 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637753 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637773 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.637845 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638063 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638091 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638094 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638266 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638290 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638338 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638467 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638481 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638501 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638504 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638640 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638854 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.638958 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639133 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639159 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639213 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639224 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639442 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639593 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639695 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639768 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639845 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.639993 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640019 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640168 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640346 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640465 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640519 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.640650 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.641634 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.641690 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.641894 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.642151 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.642325 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.642502 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.642589 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.642705 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.642921 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.642975 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.643662 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.643908 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.650642 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.650720 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.651085 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.651259 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.651443 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.651591 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.651944 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.652726 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.652790 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.652931 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.653058 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.653174 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.653245 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.653384 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.653689 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.653880 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.653961 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.654164 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.654315 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.654422 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.654694 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.654954 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.655002 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.655218 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.655366 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.655539 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.655846 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.656100 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.656136 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.656153 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.656267 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.656486 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.656614 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.656811 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.656907 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.657075 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.657139 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.657346 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.657356 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.657532 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.657708 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.657893 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.658153 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.658547 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.658766 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.658824 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.658843 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.658888 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.658969 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.659017 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.659100 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:24:51.159082095 +0000 UTC m=+21.006688566 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.659105 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.659302 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.659540 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.659724 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.659807 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.659972 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.660605 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.661195 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.661338 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.661467 4647 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.662072 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.662357 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.662381 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:51.162360357 +0000 UTC m=+21.009966778 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.662533 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.662782 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.662911 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.663100 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.663141 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.663526 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.663569 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.663694 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.663756 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.663787 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.663803 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.663982 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.664175 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.664233 4647 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.664274 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:51.16426374 +0000 UTC m=+21.011870161 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.664387 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.665577 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.665744 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.666493 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.666873 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.666916 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.667029 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.667413 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.667690 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.668785 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.668975 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.669192 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.669258 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.669332 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.669886 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.670141 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.670547 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.670951 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.671102 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.671240 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.671386 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.671586 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.671622 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.671659 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.672725 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.672761 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.677748 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.678197 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.678223 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.678236 4647 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.678285 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:51.178272152 +0000 UTC m=+21.025878573 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.680212 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.680243 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.680254 4647 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:50 crc kubenswrapper[4647]: E1128 15:24:50.680283 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:51.180274218 +0000 UTC m=+21.027880639 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.683712 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.683910 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.685987 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.696649 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.696711 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.696834 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.696871 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.697626 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.697956 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.698060 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.698161 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.698820 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.699342 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.699822 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.699822 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.699938 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.700675 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.701152 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.701950 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.702386 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.702986 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.704030 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.704536 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.704838 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.705830 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.707168 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.709822 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.710587 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.711263 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.711588 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.711708 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.712280 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.713216 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.714607 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.715156 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.716007 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.716330 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.718592 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.722715 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.724147 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.733200 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.741195 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.741935 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.741971 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742070 4647 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742122 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742138 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742151 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742165 4647 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742178 4647 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742191 4647 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742203 4647 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742215 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742229 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742243 4647 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742258 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742273 4647 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742284 4647 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742300 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742312 4647 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742326 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742340 4647 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742355 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742367 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742378 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742389 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742400 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742416 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742428 4647 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742457 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742470 4647 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742485 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742499 4647 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742512 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742525 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742538 4647 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742549 4647 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742562 4647 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742575 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742587 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742599 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742610 4647 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742622 4647 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742634 4647 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742648 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742659 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742671 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742685 4647 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742697 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742713 4647 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742726 4647 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742739 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742752 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742764 4647 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742777 4647 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742790 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742803 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742814 4647 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742827 4647 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742840 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742852 4647 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742864 4647 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742876 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742887 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742897 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742907 4647 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742916 4647 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742930 4647 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742939 4647 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742949 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742958 4647 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742967 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742977 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742986 4647 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.742996 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743007 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743016 4647 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743025 4647 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743035 4647 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743044 4647 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743052 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743061 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743071 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743080 4647 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743090 4647 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743099 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743108 4647 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743119 4647 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743127 4647 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743136 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743146 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743154 4647 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743162 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743172 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743181 4647 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743190 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743198 4647 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743207 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743217 4647 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743226 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743236 4647 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743244 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743253 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743264 4647 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743273 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743282 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743293 4647 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743305 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743316 4647 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743326 4647 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743336 4647 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743363 4647 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743373 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743384 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743393 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743404 4647 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743418 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.743428 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744729 4647 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744745 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744756 4647 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744766 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744776 4647 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744786 4647 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744795 4647 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744805 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744816 4647 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744825 4647 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744834 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744843 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744853 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744867 4647 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744877 4647 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744888 4647 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744898 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744910 4647 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744921 4647 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744930 4647 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744939 4647 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744948 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744957 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744965 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744975 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744984 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744995 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745004 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745015 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745025 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745033 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745043 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745051 4647 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745061 4647 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745070 4647 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745079 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745088 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745097 4647 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745107 4647 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745116 4647 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745125 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745133 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745144 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745154 4647 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745164 4647 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745174 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745185 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745196 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745208 4647 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745220 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745232 4647 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745243 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745253 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745264 4647 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745274 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745284 4647 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745297 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745311 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745323 4647 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745335 4647 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745347 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745360 4647 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745371 4647 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745382 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745395 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745412 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745425 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745454 4647 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745467 4647 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745480 4647 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745490 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745500 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745510 4647 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745520 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.745530 4647 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744696 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.744651 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.750508 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.761536 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.772293 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.776650 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.781620 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.782896 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.790189 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.792401 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: W1128 15:24:50.802587 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-7d814d0ba275dfb5bff200faa3400a598a5a653f3431a26dba3a1662bb75d789 WatchSource:0}: Error finding container 7d814d0ba275dfb5bff200faa3400a598a5a653f3431a26dba3a1662bb75d789: Status 404 returned error can't find the container with id 7d814d0ba275dfb5bff200faa3400a598a5a653f3431a26dba3a1662bb75d789 Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.807279 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.817418 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.827547 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.885033 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.887248 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.946717 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.946756 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:50 crc kubenswrapper[4647]: I1128 15:24:50.963633 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.047707 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.249216 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.249292 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.249322 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.249344 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.249367 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249484 4647 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249538 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:52.249523167 +0000 UTC m=+22.097129588 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249593 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:24:52.249585749 +0000 UTC m=+22.097192170 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249626 4647 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249650 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:52.249642651 +0000 UTC m=+22.097249072 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249708 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249721 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249732 4647 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249758 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:52.249748824 +0000 UTC m=+22.097355245 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249803 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249812 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249819 4647 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:51 crc kubenswrapper[4647]: E1128 15:24:51.249839 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:52.249833126 +0000 UTC m=+22.097439547 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.536470 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734"} Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.536535 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9b1d3d69fc204ee67a80df6f4d66e0089f2f57b7e6e3af00c599602f22fbbaa2"} Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.537845 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4"} Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.537890 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"c67c06d4644f22de39aabe63b27d0f7b89f228dd9bcad45f35d0376e2f6b8201"} Nov 28 15:24:51 crc kubenswrapper[4647]: I1128 15:24:51.538864 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"7d814d0ba275dfb5bff200faa3400a598a5a653f3431a26dba3a1662bb75d789"} Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.258567 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.258654 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.258678 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.258697 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.258715 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.258808 4647 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.258856 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:54.258842618 +0000 UTC m=+24.106449039 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259160 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:24:54.259151537 +0000 UTC m=+24.106757958 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259192 4647 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259213 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:54.259208438 +0000 UTC m=+24.106814859 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259262 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259271 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259281 4647 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259302 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:54.259294681 +0000 UTC m=+24.106901102 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259339 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259349 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259356 4647 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.259375 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:54.259369843 +0000 UTC m=+24.106976264 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.395369 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.395493 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.395546 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.395585 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.395617 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.395656 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.400560 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.401054 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.402287 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.403185 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.404628 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.405336 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.406041 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.407263 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.407919 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.413046 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.413561 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.414875 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.415407 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.416076 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.417031 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.417539 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.418540 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.418894 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.419479 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.420466 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.420887 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.421851 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.422811 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.424491 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.425009 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.425873 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.427189 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.427915 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.429002 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.429640 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.430600 4647 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.430713 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.432459 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.433353 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.433880 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.435882 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.436585 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.437532 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.438129 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.439161 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.439709 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.440724 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.441371 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.442314 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.442773 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.443659 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.444157 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.446237 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.446726 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.447651 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.448131 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.449152 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.450009 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.450703 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.512154 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-k7msq"] Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.512427 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-k7msq" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.516214 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.519470 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.519545 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.519823 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-4mdqn"] Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.520255 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.520492 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-7mwt4"] Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.520857 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:52 crc kubenswrapper[4647]: W1128 15:24:52.525856 4647 reflector.go:561] object-"openshift-machine-config-operator"/"proxy-tls": failed to list *v1.Secret: secrets "proxy-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.525961 4647 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"proxy-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"proxy-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:24:52 crc kubenswrapper[4647]: W1128 15:24:52.530234 4647 reflector.go:561] object-"openshift-machine-config-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Nov 28 15:24:52 crc kubenswrapper[4647]: W1128 15:24:52.530330 4647 reflector.go:561] object-"openshift-machine-config-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.530348 4647 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.530401 4647 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:24:52 crc kubenswrapper[4647]: W1128 15:24:52.530244 4647 reflector.go:561] object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": failed to list *v1.Secret: secrets "machine-config-daemon-dockercfg-r5tcq" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.530632 4647 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"machine-config-daemon-dockercfg-r5tcq\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-config-daemon-dockercfg-r5tcq\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:24:52 crc kubenswrapper[4647]: W1128 15:24:52.530278 4647 reflector.go:561] object-"openshift-machine-config-operator"/"kube-rbac-proxy": failed to list *v1.ConfigMap: configmaps "kube-rbac-proxy" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Nov 28 15:24:52 crc kubenswrapper[4647]: E1128 15:24:52.530733 4647 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"kube-rbac-proxy\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-rbac-proxy\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.531464 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-l5xdk"] Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.532369 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.534251 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-c76pb"] Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.535472 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.538948 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.539059 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.541367 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.542479 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf"} Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.543596 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.545618 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.545818 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.546010 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.546239 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.546448 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.546570 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.546614 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.560402 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.560648 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.560692 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561056 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-etc-kubernetes\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561086 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/008f163b-b2fe-4238-90b5-96f0d89f3fb5-proxy-tls\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561108 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561128 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-daemon-config\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561163 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpkrt\" (UniqueName: \"kubernetes.io/projected/8fe12df9-7deb-4f76-91cf-5b6b138d7675-kube-api-access-wpkrt\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561227 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-etc-openvswitch\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561245 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8fe12df9-7deb-4f76-91cf-5b6b138d7675-cni-binary-copy\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561285 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsvwn\" (UniqueName: \"kubernetes.io/projected/02426497-e7d0-4982-a129-e5715ad55cd1-kube-api-access-rsvwn\") pod \"node-resolver-k7msq\" (UID: \"02426497-e7d0-4982-a129-e5715ad55cd1\") " pod="openshift-dns/node-resolver-k7msq" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561334 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-systemd-units\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561367 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-conf-dir\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561385 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvh5c\" (UniqueName: \"kubernetes.io/projected/008f163b-b2fe-4238-90b5-96f0d89f3fb5-kube-api-access-qvh5c\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561409 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-system-cni-dir\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561445 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-tuning-conf-dir\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561459 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-netns\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561497 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/008f163b-b2fe-4238-90b5-96f0d89f3fb5-rootfs\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561514 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-netd\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561529 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-run-k8s-cni-cncf-io\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561543 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-var-lib-cni-bin\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561559 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-hostroot\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561575 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-ovn-kubernetes\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561589 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/de25f5ba-91da-4a77-8747-ec3a56a141df-ovn-node-metrics-cert\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561603 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-socket-dir-parent\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561627 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-var-lib-cni-multus\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561665 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-log-socket\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561681 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-cnibin\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561695 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-os-release\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561710 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/02426497-e7d0-4982-a129-e5715ad55cd1-hosts-file\") pod \"node-resolver-k7msq\" (UID: \"02426497-e7d0-4982-a129-e5715ad55cd1\") " pod="openshift-dns/node-resolver-k7msq" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561729 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-openvswitch\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561758 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-node-log\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561775 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8229\" (UniqueName: \"kubernetes.io/projected/817b3066-f5d4-49c6-a9b0-f621261d5f81-kube-api-access-q8229\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561809 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-os-release\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561825 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-system-cni-dir\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561843 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-kubelet\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561857 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-ovn\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561872 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/817b3066-f5d4-49c6-a9b0-f621261d5f81-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561889 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-bin\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561904 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-cni-dir\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561921 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/008f163b-b2fe-4238-90b5-96f0d89f3fb5-mcd-auth-proxy-config\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561939 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-cnibin\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561955 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-config\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561973 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-env-overrides\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.561989 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-run-multus-certs\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.562013 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-slash\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.562031 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-var-lib-openvswitch\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.562045 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-run-netns\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.562060 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-var-lib-kubelet\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.562077 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whrxj\" (UniqueName: \"kubernetes.io/projected/de25f5ba-91da-4a77-8747-ec3a56a141df-kube-api-access-whrxj\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.562101 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/817b3066-f5d4-49c6-a9b0-f621261d5f81-cni-binary-copy\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.562115 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-systemd\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.562129 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-script-lib\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.563208 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.587708 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.602004 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.616640 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662462 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662534 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/008f163b-b2fe-4238-90b5-96f0d89f3fb5-rootfs\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662574 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvh5c\" (UniqueName: \"kubernetes.io/projected/008f163b-b2fe-4238-90b5-96f0d89f3fb5-kube-api-access-qvh5c\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662594 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-system-cni-dir\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662614 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-tuning-conf-dir\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662631 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-netns\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662645 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-netd\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662660 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-run-k8s-cni-cncf-io\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662667 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-system-cni-dir\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662675 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-var-lib-cni-bin\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662690 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-hostroot\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662705 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-ovn-kubernetes\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662720 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/de25f5ba-91da-4a77-8747-ec3a56a141df-ovn-node-metrics-cert\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662740 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-socket-dir-parent\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662762 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-var-lib-cni-multus\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662779 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/02426497-e7d0-4982-a129-e5715ad55cd1-hosts-file\") pod \"node-resolver-k7msq\" (UID: \"02426497-e7d0-4982-a129-e5715ad55cd1\") " pod="openshift-dns/node-resolver-k7msq" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662795 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-openvswitch\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662810 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-log-socket\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662825 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-cnibin\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662838 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-os-release\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662857 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-node-log\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662877 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-os-release\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662896 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8229\" (UniqueName: \"kubernetes.io/projected/817b3066-f5d4-49c6-a9b0-f621261d5f81-kube-api-access-q8229\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662913 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-kubelet\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662645 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/008f163b-b2fe-4238-90b5-96f0d89f3fb5-rootfs\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662928 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-ovn\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662955 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-system-cni-dir\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662961 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-netns\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662970 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/008f163b-b2fe-4238-90b5-96f0d89f3fb5-mcd-auth-proxy-config\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662987 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-cnibin\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663004 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/817b3066-f5d4-49c6-a9b0-f621261d5f81-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663021 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-bin\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663034 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-run-k8s-cni-cncf-io\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663037 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-cni-dir\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663098 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-slash\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663115 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-config\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663131 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-env-overrides\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663146 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-run-multus-certs\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663162 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-var-lib-openvswitch\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663182 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-run-netns\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663199 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-var-lib-kubelet\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662858 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-ovn-kubernetes\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663210 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/02426497-e7d0-4982-a129-e5715ad55cd1-hosts-file\") pod \"node-resolver-k7msq\" (UID: \"02426497-e7d0-4982-a129-e5715ad55cd1\") " pod="openshift-dns/node-resolver-k7msq" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663218 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/817b3066-f5d4-49c6-a9b0-f621261d5f81-cni-binary-copy\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663232 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-systemd\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663258 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-systemd\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663261 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-script-lib\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663282 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-var-lib-cni-bin\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663286 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whrxj\" (UniqueName: \"kubernetes.io/projected/de25f5ba-91da-4a77-8747-ec3a56a141df-kube-api-access-whrxj\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663297 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-cni-dir\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663303 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-hostroot\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663305 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-var-lib-cni-multus\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663306 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/008f163b-b2fe-4238-90b5-96f0d89f3fb5-proxy-tls\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663323 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-log-socket\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663337 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663339 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-cnibin\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663353 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-daemon-config\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663344 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-openvswitch\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663373 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-cnibin\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663368 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-etc-kubernetes\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663388 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-etc-kubernetes\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663401 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpkrt\" (UniqueName: \"kubernetes.io/projected/8fe12df9-7deb-4f76-91cf-5b6b138d7675-kube-api-access-wpkrt\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.662988 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-netd\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663423 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-etc-openvswitch\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663460 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8fe12df9-7deb-4f76-91cf-5b6b138d7675-cni-binary-copy\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663475 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsvwn\" (UniqueName: \"kubernetes.io/projected/02426497-e7d0-4982-a129-e5715ad55cd1-kube-api-access-rsvwn\") pod \"node-resolver-k7msq\" (UID: \"02426497-e7d0-4982-a129-e5715ad55cd1\") " pod="openshift-dns/node-resolver-k7msq" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663490 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-systemd-units\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663505 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-conf-dir\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663552 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-conf-dir\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663642 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-etc-openvswitch\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663414 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-os-release\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663711 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-kubelet\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663750 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-ovn\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663783 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/817b3066-f5d4-49c6-a9b0-f621261d5f81-tuning-conf-dir\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663794 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-system-cni-dir\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663406 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-slash\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663289 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-socket-dir-parent\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663939 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-systemd-units\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663181 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-os-release\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663990 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-bin\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664013 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-run-multus-certs\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664020 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-var-lib-openvswitch\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.663200 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-node-log\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664063 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-config\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664068 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664096 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-run-netns\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664144 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/8fe12df9-7deb-4f76-91cf-5b6b138d7675-host-var-lib-kubelet\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664256 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-script-lib\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664594 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-env-overrides\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664850 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8fe12df9-7deb-4f76-91cf-5b6b138d7675-cni-binary-copy\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664887 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/8fe12df9-7deb-4f76-91cf-5b6b138d7675-multus-daemon-config\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664927 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/817b3066-f5d4-49c6-a9b0-f621261d5f81-cni-binary-copy\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.664994 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/817b3066-f5d4-49c6-a9b0-f621261d5f81-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.669960 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/de25f5ba-91da-4a77-8747-ec3a56a141df-ovn-node-metrics-cert\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.698068 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whrxj\" (UniqueName: \"kubernetes.io/projected/de25f5ba-91da-4a77-8747-ec3a56a141df-kube-api-access-whrxj\") pod \"ovnkube-node-c76pb\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.701050 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsvwn\" (UniqueName: \"kubernetes.io/projected/02426497-e7d0-4982-a129-e5715ad55cd1-kube-api-access-rsvwn\") pod \"node-resolver-k7msq\" (UID: \"02426497-e7d0-4982-a129-e5715ad55cd1\") " pod="openshift-dns/node-resolver-k7msq" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.701810 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8229\" (UniqueName: \"kubernetes.io/projected/817b3066-f5d4-49c6-a9b0-f621261d5f81-kube-api-access-q8229\") pod \"multus-additional-cni-plugins-l5xdk\" (UID: \"817b3066-f5d4-49c6-a9b0-f621261d5f81\") " pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.703197 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.706650 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpkrt\" (UniqueName: \"kubernetes.io/projected/8fe12df9-7deb-4f76-91cf-5b6b138d7675-kube-api-access-wpkrt\") pod \"multus-4mdqn\" (UID: \"8fe12df9-7deb-4f76-91cf-5b6b138d7675\") " pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.727750 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.749225 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.773371 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.804310 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.823295 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.828972 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-k7msq" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.838544 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-4mdqn" Nov 28 15:24:52 crc kubenswrapper[4647]: W1128 15:24:52.842205 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02426497_e7d0_4982_a129_e5715ad55cd1.slice/crio-3f8fc9a64e7dbce67c532cb0391fae47fa2499555005333497b3bd96659e279a WatchSource:0}: Error finding container 3f8fc9a64e7dbce67c532cb0391fae47fa2499555005333497b3bd96659e279a: Status 404 returned error can't find the container with id 3f8fc9a64e7dbce67c532cb0391fae47fa2499555005333497b3bd96659e279a Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.846742 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.856006 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.861023 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.863360 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.886567 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: W1128 15:24:52.899524 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde25f5ba_91da_4a77_8747_ec3a56a141df.slice/crio-fc355af2b5023259bff2abf7c649dbb01de73ec932f8f0f10ca65ad1e7140ce4 WatchSource:0}: Error finding container fc355af2b5023259bff2abf7c649dbb01de73ec932f8f0f10ca65ad1e7140ce4: Status 404 returned error can't find the container with id fc355af2b5023259bff2abf7c649dbb01de73ec932f8f0f10ca65ad1e7140ce4 Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.903009 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.920048 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.936435 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.949897 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.968686 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:52 crc kubenswrapper[4647]: I1128 15:24:52.985749 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.365830 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.375510 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.390853 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/008f163b-b2fe-4238-90b5-96f0d89f3fb5-proxy-tls\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.547032 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff"} Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.553250 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848" exitCode=0 Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.553303 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848"} Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.553329 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"fc355af2b5023259bff2abf7c649dbb01de73ec932f8f0f10ca65ad1e7140ce4"} Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.556160 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" event={"ID":"817b3066-f5d4-49c6-a9b0-f621261d5f81","Type":"ContainerStarted","Data":"eafc61f004c4e7339e2a5efb9fa329a79f1262dde67f079d92c42612c1a697c2"} Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.561010 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4mdqn" event={"ID":"8fe12df9-7deb-4f76-91cf-5b6b138d7675","Type":"ContainerStarted","Data":"c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9"} Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.561056 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4mdqn" event={"ID":"8fe12df9-7deb-4f76-91cf-5b6b138d7675","Type":"ContainerStarted","Data":"11e3dd7b85c0d89ac1b2c296d3fb87e044a5f043de7cd7a7834428385071a875"} Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.567961 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-k7msq" event={"ID":"02426497-e7d0-4982-a129-e5715ad55cd1","Type":"ContainerStarted","Data":"3f8fc9a64e7dbce67c532cb0391fae47fa2499555005333497b3bd96659e279a"} Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.577282 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.585297 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.594652 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/008f163b-b2fe-4238-90b5-96f0d89f3fb5-mcd-auth-proxy-config\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.595381 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.616365 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.636740 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.657183 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.680693 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: E1128 15:24:53.690299 4647 projected.go:288] Couldn't get configMap openshift-machine-config-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.696661 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.711480 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.729884 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.730563 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 15:24:53 crc kubenswrapper[4647]: E1128 15:24:53.741073 4647 projected.go:194] Error preparing data for projected volume kube-api-access-qvh5c for pod openshift-machine-config-operator/machine-config-daemon-7mwt4: failed to sync configmap cache: timed out waiting for the condition Nov 28 15:24:53 crc kubenswrapper[4647]: E1128 15:24:53.741167 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/008f163b-b2fe-4238-90b5-96f0d89f3fb5-kube-api-access-qvh5c podName:008f163b-b2fe-4238-90b5-96f0d89f3fb5 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:54.241148823 +0000 UTC m=+24.088755244 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-qvh5c" (UniqueName: "kubernetes.io/projected/008f163b-b2fe-4238-90b5-96f0d89f3fb5-kube-api-access-qvh5c") pod "machine-config-daemon-7mwt4" (UID: "008f163b-b2fe-4238-90b5-96f0d89f3fb5") : failed to sync configmap cache: timed out waiting for the condition Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.756292 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.778698 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.800114 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.818793 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.838398 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.854181 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.867629 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.881874 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.899502 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.918391 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.926520 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.950594 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.969769 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:53 crc kubenswrapper[4647]: I1128 15:24:53.987450 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.004876 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.016891 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.282721 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283212 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:24:58.28316141 +0000 UTC m=+28.130767911 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.283479 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.283519 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.283549 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.283569 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvh5c\" (UniqueName: \"kubernetes.io/projected/008f163b-b2fe-4238-90b5-96f0d89f3fb5-kube-api-access-qvh5c\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283584 4647 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.283591 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283713 4647 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283731 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:58.283638203 +0000 UTC m=+28.131244644 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283760 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283784 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:58.283764527 +0000 UTC m=+28.131370948 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283796 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283810 4647 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283760 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283953 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283872 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:58.283852939 +0000 UTC m=+28.131459360 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.283987 4647 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.284075 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:24:58.284053135 +0000 UTC m=+28.131659556 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.288868 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvh5c\" (UniqueName: \"kubernetes.io/projected/008f163b-b2fe-4238-90b5-96f0d89f3fb5-kube-api-access-qvh5c\") pod \"machine-config-daemon-7mwt4\" (UID: \"008f163b-b2fe-4238-90b5-96f0d89f3fb5\") " pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.350086 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:24:54 crc kubenswrapper[4647]: W1128 15:24:54.363854 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod008f163b_b2fe_4238_90b5_96f0d89f3fb5.slice/crio-0e1a3469618ecc787df7585c60258552785e8c998147a966ceba78cc45be5157 WatchSource:0}: Error finding container 0e1a3469618ecc787df7585c60258552785e8c998147a966ceba78cc45be5157: Status 404 returned error can't find the container with id 0e1a3469618ecc787df7585c60258552785e8c998147a966ceba78cc45be5157 Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.393510 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.393613 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.393532 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.393751 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.393877 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:24:54 crc kubenswrapper[4647]: E1128 15:24:54.393936 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.520212 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-dg57z"] Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.520569 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.523129 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.523145 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.523271 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.526796 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.541380 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.557126 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.578945 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.579200 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.579215 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.579227 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.579241 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.579323 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.580051 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.581007 4647 generic.go:334] "Generic (PLEG): container finished" podID="817b3066-f5d4-49c6-a9b0-f621261d5f81" containerID="19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9" exitCode=0 Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.581111 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" event={"ID":"817b3066-f5d4-49c6-a9b0-f621261d5f81","Type":"ContainerDied","Data":"19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.582991 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-k7msq" event={"ID":"02426497-e7d0-4982-a129-e5715ad55cd1","Type":"ContainerStarted","Data":"b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.586234 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/0bf7a521-f9a3-4e6b-a455-337a6a6db730-serviceca\") pod \"node-ca-dg57z\" (UID: \"0bf7a521-f9a3-4e6b-a455-337a6a6db730\") " pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.586293 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0bf7a521-f9a3-4e6b-a455-337a6a6db730-host\") pod \"node-ca-dg57z\" (UID: \"0bf7a521-f9a3-4e6b-a455-337a6a6db730\") " pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.586383 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpfgv\" (UniqueName: \"kubernetes.io/projected/0bf7a521-f9a3-4e6b-a455-337a6a6db730-kube-api-access-xpfgv\") pod \"node-ca-dg57z\" (UID: \"0bf7a521-f9a3-4e6b-a455-337a6a6db730\") " pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.589260 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.589329 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.589345 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"0e1a3469618ecc787df7585c60258552785e8c998147a966ceba78cc45be5157"} Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.597376 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.624974 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.646757 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.659993 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.676565 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.686973 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0bf7a521-f9a3-4e6b-a455-337a6a6db730-host\") pod \"node-ca-dg57z\" (UID: \"0bf7a521-f9a3-4e6b-a455-337a6a6db730\") " pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.687082 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpfgv\" (UniqueName: \"kubernetes.io/projected/0bf7a521-f9a3-4e6b-a455-337a6a6db730-kube-api-access-xpfgv\") pod \"node-ca-dg57z\" (UID: \"0bf7a521-f9a3-4e6b-a455-337a6a6db730\") " pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.687152 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/0bf7a521-f9a3-4e6b-a455-337a6a6db730-serviceca\") pod \"node-ca-dg57z\" (UID: \"0bf7a521-f9a3-4e6b-a455-337a6a6db730\") " pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.687156 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0bf7a521-f9a3-4e6b-a455-337a6a6db730-host\") pod \"node-ca-dg57z\" (UID: \"0bf7a521-f9a3-4e6b-a455-337a6a6db730\") " pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.688289 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/0bf7a521-f9a3-4e6b-a455-337a6a6db730-serviceca\") pod \"node-ca-dg57z\" (UID: \"0bf7a521-f9a3-4e6b-a455-337a6a6db730\") " pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.690894 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.707634 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.710816 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpfgv\" (UniqueName: \"kubernetes.io/projected/0bf7a521-f9a3-4e6b-a455-337a6a6db730-kube-api-access-xpfgv\") pod \"node-ca-dg57z\" (UID: \"0bf7a521-f9a3-4e6b-a455-337a6a6db730\") " pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.722713 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.736184 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.748378 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.763432 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.778133 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.793225 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.805193 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.819935 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.831896 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-dg57z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.836262 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: W1128 15:24:54.846647 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0bf7a521_f9a3_4e6b_a455_337a6a6db730.slice/crio-f5e4ec76fd5f1af153ddc8a9ee633933cab871552412a01679d35977122dd743 WatchSource:0}: Error finding container f5e4ec76fd5f1af153ddc8a9ee633933cab871552412a01679d35977122dd743: Status 404 returned error can't find the container with id f5e4ec76fd5f1af153ddc8a9ee633933cab871552412a01679d35977122dd743 Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.857366 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.886369 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.901392 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.923632 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:54 crc kubenswrapper[4647]: I1128 15:24:54.949683 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.049821 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.098501 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.593811 4647 generic.go:334] "Generic (PLEG): container finished" podID="817b3066-f5d4-49c6-a9b0-f621261d5f81" containerID="1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee" exitCode=0 Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.593878 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" event={"ID":"817b3066-f5d4-49c6-a9b0-f621261d5f81","Type":"ContainerDied","Data":"1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee"} Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.596498 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-dg57z" event={"ID":"0bf7a521-f9a3-4e6b-a455-337a6a6db730","Type":"ContainerStarted","Data":"bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c"} Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.596540 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-dg57z" event={"ID":"0bf7a521-f9a3-4e6b-a455-337a6a6db730","Type":"ContainerStarted","Data":"f5e4ec76fd5f1af153ddc8a9ee633933cab871552412a01679d35977122dd743"} Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.619343 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.634282 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.645436 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.659264 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.678178 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.705743 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.727626 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.743183 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.765767 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.780209 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.797180 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.812345 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.827756 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.840473 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.853404 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.866193 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.887532 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.900832 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.921327 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.932360 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.944162 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.961381 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.981869 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:55 crc kubenswrapper[4647]: I1128 15:24:55.998105 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.017314 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.030933 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.393389 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.393455 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:24:56 crc kubenswrapper[4647]: E1128 15:24:56.393549 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:24:56 crc kubenswrapper[4647]: E1128 15:24:56.393745 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.393812 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:56 crc kubenswrapper[4647]: E1128 15:24:56.394086 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.478209 4647 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.480158 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.480219 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.480232 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.480383 4647 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.488037 4647 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.488484 4647 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.489937 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.489983 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.489994 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.490013 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.490027 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:56Z","lastTransitionTime":"2025-11-28T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:56 crc kubenswrapper[4647]: E1128 15:24:56.508487 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.516738 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.516783 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.516793 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.516809 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.516820 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:56Z","lastTransitionTime":"2025-11-28T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:56 crc kubenswrapper[4647]: E1128 15:24:56.527708 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.530733 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.530777 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.530789 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.530807 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.530819 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:56Z","lastTransitionTime":"2025-11-28T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:56 crc kubenswrapper[4647]: E1128 15:24:56.541843 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.544982 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.545027 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.545039 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.545055 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.545067 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:56Z","lastTransitionTime":"2025-11-28T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:56 crc kubenswrapper[4647]: E1128 15:24:56.555887 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.559218 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.559262 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.559275 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.559295 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.559307 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:56Z","lastTransitionTime":"2025-11-28T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:56 crc kubenswrapper[4647]: E1128 15:24:56.569783 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: E1128 15:24:56.569956 4647 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.571563 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.571592 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.571603 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.571621 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.571632 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:56Z","lastTransitionTime":"2025-11-28T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.604672 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f"} Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.607466 4647 generic.go:334] "Generic (PLEG): container finished" podID="817b3066-f5d4-49c6-a9b0-f621261d5f81" containerID="689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9" exitCode=0 Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.607523 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" event={"ID":"817b3066-f5d4-49c6-a9b0-f621261d5f81","Type":"ContainerDied","Data":"689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9"} Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.654899 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.673999 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.674039 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.674048 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.674063 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.674073 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:56Z","lastTransitionTime":"2025-11-28T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.680554 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.704222 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.728129 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.739690 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.749337 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.788712 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.788766 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.788780 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.788802 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.788817 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:56Z","lastTransitionTime":"2025-11-28T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.791659 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.807765 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.827363 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.840506 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.861551 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.878280 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.892414 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:56Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.892557 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.892571 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.892579 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.892592 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.892602 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:56Z","lastTransitionTime":"2025-11-28T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.996399 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.996475 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.996494 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.996518 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:56 crc kubenswrapper[4647]: I1128 15:24:56.996536 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:56Z","lastTransitionTime":"2025-11-28T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.099341 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.099399 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.099410 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.099445 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.099457 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:57Z","lastTransitionTime":"2025-11-28T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.202136 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.202205 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.202226 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.202252 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.202269 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:57Z","lastTransitionTime":"2025-11-28T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.221416 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.233740 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.238293 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.243494 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.258864 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.273931 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.293137 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.304853 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.304891 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.304906 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.304926 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.304941 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:57Z","lastTransitionTime":"2025-11-28T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.311974 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.328665 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.342318 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.355946 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.368015 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.380345 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.394841 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.408088 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.408138 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.408150 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.408173 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.408187 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:57Z","lastTransitionTime":"2025-11-28T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.414065 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.428662 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.441915 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.456998 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.469783 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.483032 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.496801 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.511575 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.511622 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.511634 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.511653 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.511668 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:57Z","lastTransitionTime":"2025-11-28T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.512830 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.532069 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.558245 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.575125 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.589172 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.602189 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.613212 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.613249 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.613258 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.613274 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.613283 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:57Z","lastTransitionTime":"2025-11-28T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.613915 4647 generic.go:334] "Generic (PLEG): container finished" podID="817b3066-f5d4-49c6-a9b0-f621261d5f81" containerID="07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330" exitCode=0 Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.614737 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" event={"ID":"817b3066-f5d4-49c6-a9b0-f621261d5f81","Type":"ContainerDied","Data":"07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330"} Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.622732 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.636389 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.654981 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.672187 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.683905 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.695016 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.709679 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.715674 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.715731 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.715746 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.715768 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.715783 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:57Z","lastTransitionTime":"2025-11-28T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.728113 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.751543 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.767537 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.782305 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.794104 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.808884 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.823674 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.823733 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.823744 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.823765 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.823779 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:57Z","lastTransitionTime":"2025-11-28T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.823893 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.839615 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.854265 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.868021 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.926027 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.926060 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.926070 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.926086 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:57 crc kubenswrapper[4647]: I1128 15:24:57.926098 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:57Z","lastTransitionTime":"2025-11-28T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.028536 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.028578 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.028586 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.028600 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.028610 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:58Z","lastTransitionTime":"2025-11-28T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.132576 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.132644 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.132673 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.132708 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.132735 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:58Z","lastTransitionTime":"2025-11-28T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.235108 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.235144 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.235152 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.235166 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.235177 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:58Z","lastTransitionTime":"2025-11-28T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.326992 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.327481 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.327525 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.327562 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.327592 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.327710 4647 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.327711 4647 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.327720 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.327823 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.327854 4647 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.327720 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.327933 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.327971 4647 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.327714 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:25:06.327677276 +0000 UTC m=+36.175283737 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.328065 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:06.328034486 +0000 UTC m=+36.175640967 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.328105 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:06.328089367 +0000 UTC m=+36.175695878 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.328139 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:06.328126068 +0000 UTC m=+36.175732579 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.328179 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:06.328163279 +0000 UTC m=+36.175769730 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.337908 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.337934 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.337943 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.337957 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.337967 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:58Z","lastTransitionTime":"2025-11-28T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.393967 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.394006 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.394081 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.394075 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.394170 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:24:58 crc kubenswrapper[4647]: E1128 15:24:58.394229 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.440601 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.440644 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.440656 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.440705 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.440717 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:58Z","lastTransitionTime":"2025-11-28T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.542390 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.542452 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.542463 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.542477 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.542487 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:58Z","lastTransitionTime":"2025-11-28T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.622169 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.622625 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.626204 4647 generic.go:334] "Generic (PLEG): container finished" podID="817b3066-f5d4-49c6-a9b0-f621261d5f81" containerID="a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39" exitCode=0 Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.626244 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" event={"ID":"817b3066-f5d4-49c6-a9b0-f621261d5f81","Type":"ContainerDied","Data":"a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.644279 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.646233 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.646280 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.646294 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.646314 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.646327 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:58Z","lastTransitionTime":"2025-11-28T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.665744 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.667771 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.685067 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.706104 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.724338 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.748231 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.750572 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.750630 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.750650 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.750675 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.750693 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:58Z","lastTransitionTime":"2025-11-28T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.766871 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.781766 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.798479 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.814990 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.828362 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.839414 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.852534 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.853510 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.853556 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.853566 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.853582 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.853592 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:58Z","lastTransitionTime":"2025-11-28T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.871340 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.885706 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.899236 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.916638 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.931670 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.945714 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.956239 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.956282 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.956295 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.956314 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.956327 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:58Z","lastTransitionTime":"2025-11-28T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.960009 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.972074 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.980662 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:58 crc kubenswrapper[4647]: I1128 15:24:58.993788 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.009522 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.027082 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.040771 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.058395 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.058668 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.058768 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.058895 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.058916 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.058936 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:59Z","lastTransitionTime":"2025-11-28T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.072306 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.162593 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.162899 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.163064 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.163136 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.163338 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:59Z","lastTransitionTime":"2025-11-28T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.266743 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.267025 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.267150 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.267237 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.267314 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:59Z","lastTransitionTime":"2025-11-28T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.370386 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.370711 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.370772 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.370844 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.370900 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:59Z","lastTransitionTime":"2025-11-28T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.473680 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.473730 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.473741 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.473764 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.473777 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:59Z","lastTransitionTime":"2025-11-28T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.577372 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.577528 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.577990 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.578087 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.578382 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:59Z","lastTransitionTime":"2025-11-28T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.630467 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.631038 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.656830 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.679276 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.682354 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.682459 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.682480 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.682511 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.682533 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:59Z","lastTransitionTime":"2025-11-28T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.701810 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.722143 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.742536 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.761555 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.779130 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.784862 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.784897 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.784909 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.784924 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.784934 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:59Z","lastTransitionTime":"2025-11-28T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.797917 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.822338 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.846207 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.869690 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.887043 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.887123 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.887143 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.887173 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.887197 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:59Z","lastTransitionTime":"2025-11-28T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.889121 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.907182 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.927510 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.948534 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:24:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.990254 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.990344 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.990363 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.990401 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:24:59 crc kubenswrapper[4647]: I1128 15:24:59.990459 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:24:59Z","lastTransitionTime":"2025-11-28T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.098829 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.098917 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.098942 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.098973 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.099005 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:00Z","lastTransitionTime":"2025-11-28T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.202635 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.202717 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.202743 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.202778 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.202803 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:00Z","lastTransitionTime":"2025-11-28T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.305664 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.305716 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.305733 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.305755 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.305770 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:00Z","lastTransitionTime":"2025-11-28T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.393917 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.393924 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:00 crc kubenswrapper[4647]: E1128 15:25:00.394091 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:00 crc kubenswrapper[4647]: E1128 15:25:00.394227 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.394608 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:00 crc kubenswrapper[4647]: E1128 15:25:00.394702 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.409320 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.409366 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.409380 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.409397 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.409434 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:00Z","lastTransitionTime":"2025-11-28T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.415659 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.434803 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.452157 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.467942 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.485802 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.499927 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.514340 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.514389 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.514398 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.514429 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.514439 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:00Z","lastTransitionTime":"2025-11-28T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.514953 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.529361 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.546982 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.565907 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.591931 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.606469 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.616822 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.616900 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.616920 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.616936 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.616949 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:00Z","lastTransitionTime":"2025-11-28T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.621245 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.636132 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" event={"ID":"817b3066-f5d4-49c6-a9b0-f621261d5f81","Type":"ContainerStarted","Data":"b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b"} Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.636177 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.638084 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.719519 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.719562 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.719589 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.719606 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.719616 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:00Z","lastTransitionTime":"2025-11-28T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.822655 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.822730 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.822743 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.822768 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.822785 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:00Z","lastTransitionTime":"2025-11-28T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.927442 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.927926 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.927937 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.927954 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:00 crc kubenswrapper[4647]: I1128 15:25:00.927973 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:00Z","lastTransitionTime":"2025-11-28T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.032078 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.032127 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.032139 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.032160 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.032174 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:01Z","lastTransitionTime":"2025-11-28T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.134941 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.134991 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.135003 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.135025 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.135041 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:01Z","lastTransitionTime":"2025-11-28T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.238394 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.238464 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.238475 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.238494 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.238510 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:01Z","lastTransitionTime":"2025-11-28T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.340592 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.340652 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.340663 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.340681 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.340693 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:01Z","lastTransitionTime":"2025-11-28T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.444202 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.444257 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.444269 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.444288 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.444303 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:01Z","lastTransitionTime":"2025-11-28T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.548260 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.548356 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.548379 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.548445 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.548470 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:01Z","lastTransitionTime":"2025-11-28T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.643602 4647 generic.go:334] "Generic (PLEG): container finished" podID="817b3066-f5d4-49c6-a9b0-f621261d5f81" containerID="b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b" exitCode=0 Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.643681 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" event={"ID":"817b3066-f5d4-49c6-a9b0-f621261d5f81","Type":"ContainerDied","Data":"b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.643908 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.654900 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.654948 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.654957 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.654973 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.654988 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:01Z","lastTransitionTime":"2025-11-28T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.681180 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.704384 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.727509 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.751254 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.757396 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.757511 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.757529 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.757556 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.757575 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:01Z","lastTransitionTime":"2025-11-28T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.767345 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.783700 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.802894 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.863931 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.864830 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.864864 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.864878 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.864900 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.864915 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:01Z","lastTransitionTime":"2025-11-28T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.879897 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.894393 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.910106 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.930010 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.952082 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.967877 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.967924 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.967937 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.967963 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.967978 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:01Z","lastTransitionTime":"2025-11-28T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:01 crc kubenswrapper[4647]: I1128 15:25:01.971545 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:01Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.070642 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.070691 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.070703 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.070724 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.070739 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:02Z","lastTransitionTime":"2025-11-28T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.174059 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.174125 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.174143 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.174168 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.174188 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:02Z","lastTransitionTime":"2025-11-28T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.277206 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.277263 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.277281 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.277310 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.277329 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:02Z","lastTransitionTime":"2025-11-28T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.381810 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.381880 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.381903 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.381926 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.381940 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:02Z","lastTransitionTime":"2025-11-28T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.394258 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.394306 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.394278 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:02 crc kubenswrapper[4647]: E1128 15:25:02.394430 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:02 crc kubenswrapper[4647]: E1128 15:25:02.394553 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:02 crc kubenswrapper[4647]: E1128 15:25:02.394635 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.484528 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.484591 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.484668 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.484713 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.484734 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:02Z","lastTransitionTime":"2025-11-28T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.588026 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.588100 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.588120 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.588150 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.588170 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:02Z","lastTransitionTime":"2025-11-28T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.691044 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.691128 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.691153 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.691188 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.691214 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:02Z","lastTransitionTime":"2025-11-28T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.794800 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.794860 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.794880 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.794907 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.794926 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:02Z","lastTransitionTime":"2025-11-28T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.897773 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.897830 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.897846 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.897871 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:02 crc kubenswrapper[4647]: I1128 15:25:02.897886 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:02Z","lastTransitionTime":"2025-11-28T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.001401 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.001481 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.001493 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.001516 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.001529 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:03Z","lastTransitionTime":"2025-11-28T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.105398 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.105468 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.105483 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.105502 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.105514 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:03Z","lastTransitionTime":"2025-11-28T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.209066 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.209131 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.209146 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.209168 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.209185 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:03Z","lastTransitionTime":"2025-11-28T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.312313 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.312387 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.312405 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.312451 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.312470 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:03Z","lastTransitionTime":"2025-11-28T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.416619 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.417403 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.417574 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.417730 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.417883 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:03Z","lastTransitionTime":"2025-11-28T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.520479 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.520521 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.520533 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.520550 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.520565 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:03Z","lastTransitionTime":"2025-11-28T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.622400 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.622465 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.622474 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.622488 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.622497 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:03Z","lastTransitionTime":"2025-11-28T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.654743 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" event={"ID":"817b3066-f5d4-49c6-a9b0-f621261d5f81","Type":"ContainerStarted","Data":"9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.672209 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.684117 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.697652 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.708953 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.724446 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.724477 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.724487 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.724501 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.724510 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:03Z","lastTransitionTime":"2025-11-28T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.726637 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.758988 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.775367 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.788505 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.799793 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.815110 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.827157 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.827201 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.827222 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.827238 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.827247 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:03Z","lastTransitionTime":"2025-11-28T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.827582 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.844297 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.858198 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.870295 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:03Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.929463 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.929510 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.929522 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.929539 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:03 crc kubenswrapper[4647]: I1128 15:25:03.929553 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:03Z","lastTransitionTime":"2025-11-28T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.032406 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.032508 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.032525 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.032547 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.032566 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:04Z","lastTransitionTime":"2025-11-28T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.135788 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.135857 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.135876 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.135908 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.135928 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:04Z","lastTransitionTime":"2025-11-28T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.239393 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.239453 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.239462 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.239477 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.239489 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:04Z","lastTransitionTime":"2025-11-28T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.342396 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.342458 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.342468 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.342484 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.342494 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:04Z","lastTransitionTime":"2025-11-28T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.394063 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.394081 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:04 crc kubenswrapper[4647]: E1128 15:25:04.394229 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.394319 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:04 crc kubenswrapper[4647]: E1128 15:25:04.394456 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:04 crc kubenswrapper[4647]: E1128 15:25:04.394572 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.445305 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.445357 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.445369 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.445389 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.445405 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:04Z","lastTransitionTime":"2025-11-28T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.549666 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.549766 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.549787 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.549814 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.549831 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:04Z","lastTransitionTime":"2025-11-28T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.652750 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.652800 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.652811 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.652827 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.652838 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:04Z","lastTransitionTime":"2025-11-28T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.660932 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/0.log" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.666472 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602" exitCode=1 Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.667495 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.668646 4647 scope.go:117] "RemoveContainer" containerID="e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.692108 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 15:25:03.543847 5818 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.544278 5818 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544437 5818 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544644 5818 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544731 5818 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544787 5818 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544881 5818 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.545404 5818 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.705773 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.721075 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.732161 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.744589 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.755685 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.755859 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.755889 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.755904 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.755923 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.755935 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:04Z","lastTransitionTime":"2025-11-28T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.769354 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.784818 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.801237 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.813949 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.829639 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.844577 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.858743 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.858801 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.858816 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.859042 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.859065 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:04Z","lastTransitionTime":"2025-11-28T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.862823 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.878495 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.929843 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2"] Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.930597 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:04 crc kubenswrapper[4647]: W1128 15:25:04.932147 4647 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert": failed to list *v1.Secret: secrets "ovn-control-plane-metrics-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Nov 28 15:25:04 crc kubenswrapper[4647]: E1128 15:25:04.932215 4647 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-control-plane-metrics-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-control-plane-metrics-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:25:04 crc kubenswrapper[4647]: W1128 15:25:04.932879 4647 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd": failed to list *v1.Secret: secrets "ovn-kubernetes-control-plane-dockercfg-gs7dd" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Nov 28 15:25:04 crc kubenswrapper[4647]: E1128 15:25:04.932904 4647 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-control-plane-dockercfg-gs7dd\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-kubernetes-control-plane-dockercfg-gs7dd\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.949533 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.961213 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.961263 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.961276 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.961295 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.961307 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:04Z","lastTransitionTime":"2025-11-28T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.967201 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.983327 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:04 crc kubenswrapper[4647]: I1128 15:25:04.997853 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:04Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.006116 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxt6r\" (UniqueName: \"kubernetes.io/projected/3c61497d-3cda-498b-94fc-372dc6030924-kube-api-access-bxt6r\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.006163 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c61497d-3cda-498b-94fc-372dc6030924-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.006198 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c61497d-3cda-498b-94fc-372dc6030924-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.006236 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c61497d-3cda-498b-94fc-372dc6030924-env-overrides\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.013323 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.024394 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.034059 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.046716 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.063260 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.063314 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.063327 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.063346 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.063358 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:05Z","lastTransitionTime":"2025-11-28T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.068793 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.086452 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 15:25:03.543847 5818 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.544278 5818 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544437 5818 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544644 5818 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544731 5818 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544787 5818 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544881 5818 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.545404 5818 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.100158 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.106980 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c61497d-3cda-498b-94fc-372dc6030924-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.107043 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c61497d-3cda-498b-94fc-372dc6030924-env-overrides\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.107073 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxt6r\" (UniqueName: \"kubernetes.io/projected/3c61497d-3cda-498b-94fc-372dc6030924-kube-api-access-bxt6r\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.107092 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c61497d-3cda-498b-94fc-372dc6030924-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.107786 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c61497d-3cda-498b-94fc-372dc6030924-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.107835 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c61497d-3cda-498b-94fc-372dc6030924-env-overrides\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.117255 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.134954 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.138748 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxt6r\" (UniqueName: \"kubernetes.io/projected/3c61497d-3cda-498b-94fc-372dc6030924-kube-api-access-bxt6r\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.148784 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.164526 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.166221 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.166250 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.166259 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.166274 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.166283 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:05Z","lastTransitionTime":"2025-11-28T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.268844 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.268875 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.268885 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.268899 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.268908 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:05Z","lastTransitionTime":"2025-11-28T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.371925 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.371967 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.371979 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.371995 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.372008 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:05Z","lastTransitionTime":"2025-11-28T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.476827 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.476887 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.476906 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.476977 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.476995 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:05Z","lastTransitionTime":"2025-11-28T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.580527 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.580600 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.580621 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.580659 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.580678 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:05Z","lastTransitionTime":"2025-11-28T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.677721 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/0.log" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.680958 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b"} Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.681157 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.684038 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.684111 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.684130 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.684158 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.684178 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:05Z","lastTransitionTime":"2025-11-28T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.703323 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.722346 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.743295 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.816259 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.816310 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.816320 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.816341 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.816353 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:05Z","lastTransitionTime":"2025-11-28T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.818377 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.839343 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.863029 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 15:25:03.543847 5818 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.544278 5818 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544437 5818 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544644 5818 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544731 5818 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544787 5818 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544881 5818 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.545404 5818 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.881948 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.896352 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.910634 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.919169 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.919211 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.919222 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.919240 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.919252 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:05Z","lastTransitionTime":"2025-11-28T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.922701 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.935958 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.949586 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.963839 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.977748 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:05 crc kubenswrapper[4647]: I1128 15:25:05.990762 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.021742 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.021873 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.021889 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.021911 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.021928 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.107599 4647 secret.go:188] Couldn't get secret openshift-ovn-kubernetes/ovn-control-plane-metrics-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.107740 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c61497d-3cda-498b-94fc-372dc6030924-ovn-control-plane-metrics-cert podName:3c61497d-3cda-498b-94fc-372dc6030924 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:06.607711487 +0000 UTC m=+36.455317918 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "ovn-control-plane-metrics-cert" (UniqueName: "kubernetes.io/secret/3c61497d-3cda-498b-94fc-372dc6030924-ovn-control-plane-metrics-cert") pod "ovnkube-control-plane-749d76644c-55cn2" (UID: "3c61497d-3cda-498b-94fc-372dc6030924") : failed to sync secret cache: timed out waiting for the condition Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.125301 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.125363 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.125379 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.125403 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.125437 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.188127 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.227975 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.228019 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.228030 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.228045 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.228056 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.331679 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.331750 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.331768 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.331795 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.331813 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.394316 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.394628 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.394902 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.394980 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.395158 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.395343 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.416083 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.421670 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.421801 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.421938 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.421936 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:25:22.421872284 +0000 UTC m=+52.269478745 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.421964 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.422037 4647 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.422095 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.422125 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:22.422100051 +0000 UTC m=+52.269706472 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.422165 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.422222 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.422241 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.422245 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.422257 4647 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.422362 4647 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.422307 4647 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.422396 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:22.422384599 +0000 UTC m=+52.269991020 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.422487 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:22.422471091 +0000 UTC m=+52.270077542 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.422528 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:22.422515892 +0000 UTC m=+52.270122353 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.434818 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.434875 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.434887 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.434905 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.434919 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.538155 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.538188 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.538197 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.538221 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.538230 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.624602 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c61497d-3cda-498b-94fc-372dc6030924-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.630713 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c61497d-3cda-498b-94fc-372dc6030924-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-55cn2\" (UID: \"3c61497d-3cda-498b-94fc-372dc6030924\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.641492 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.641524 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.641537 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.641554 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.641566 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.643051 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.643077 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.643088 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.643102 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.643113 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.663583 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.669723 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.669787 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.669802 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.669827 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.669851 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.693389 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.698534 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.698591 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.698611 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.698639 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.698656 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.719927 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.725865 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.726592 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.726637 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.726732 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.726782 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.743912 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.748993 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.755765 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.755826 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.755845 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.755871 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.755887 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.787541 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.788136 4647 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.794161 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.794221 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.794234 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.794256 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.795599 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.866556 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-cz6sq"] Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.867913 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:06 crc kubenswrapper[4647]: E1128 15:25:06.868025 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.928923 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.928984 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78srd\" (UniqueName: \"kubernetes.io/projected/a79b0b39-cffb-4ac3-a526-837c6aa70616-kube-api-access-78srd\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.931728 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.931762 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.931772 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.931789 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.931805 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:06Z","lastTransitionTime":"2025-11-28T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.933766 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.947603 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.964791 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.976000 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:06 crc kubenswrapper[4647]: I1128 15:25:06.989244 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.003539 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.026339 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 15:25:03.543847 5818 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.544278 5818 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544437 5818 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544644 5818 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544731 5818 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544787 5818 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544881 5818 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.545404 5818 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.029978 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78srd\" (UniqueName: \"kubernetes.io/projected/a79b0b39-cffb-4ac3-a526-837c6aa70616-kube-api-access-78srd\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.030325 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:07 crc kubenswrapper[4647]: E1128 15:25:07.030484 4647 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:07 crc kubenswrapper[4647]: E1128 15:25:07.030582 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs podName:a79b0b39-cffb-4ac3-a526-837c6aa70616 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:07.530558247 +0000 UTC m=+37.378164668 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs") pod "network-metrics-daemon-cz6sq" (UID: "a79b0b39-cffb-4ac3-a526-837c6aa70616") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.037381 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.037448 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.037461 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.037479 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.037492 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:07Z","lastTransitionTime":"2025-11-28T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.040768 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.058433 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78srd\" (UniqueName: \"kubernetes.io/projected/a79b0b39-cffb-4ac3-a526-837c6aa70616-kube-api-access-78srd\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.059120 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.074389 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.090905 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.105207 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.123718 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.135883 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.140215 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.140249 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.140258 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.140297 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.140311 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:07Z","lastTransitionTime":"2025-11-28T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.151172 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.164866 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.243246 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.243319 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.243333 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.243364 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.243381 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:07Z","lastTransitionTime":"2025-11-28T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.346173 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.346246 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.346270 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.346300 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.346318 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:07Z","lastTransitionTime":"2025-11-28T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.449505 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.449543 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.449551 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.449566 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.449577 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:07Z","lastTransitionTime":"2025-11-28T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.534906 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:07 crc kubenswrapper[4647]: E1128 15:25:07.535100 4647 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:07 crc kubenswrapper[4647]: E1128 15:25:07.535207 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs podName:a79b0b39-cffb-4ac3-a526-837c6aa70616 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:08.535175226 +0000 UTC m=+38.382781647 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs") pod "network-metrics-daemon-cz6sq" (UID: "a79b0b39-cffb-4ac3-a526-837c6aa70616") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.551860 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.551889 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.551897 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.551911 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.551922 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:07Z","lastTransitionTime":"2025-11-28T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.654758 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.654799 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.654812 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.654829 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.654841 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:07Z","lastTransitionTime":"2025-11-28T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.691704 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" event={"ID":"3c61497d-3cda-498b-94fc-372dc6030924","Type":"ContainerStarted","Data":"3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.691759 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" event={"ID":"3c61497d-3cda-498b-94fc-372dc6030924","Type":"ContainerStarted","Data":"2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.691776 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" event={"ID":"3c61497d-3cda-498b-94fc-372dc6030924","Type":"ContainerStarted","Data":"f056503576c90593a55aab8b8d6a40483c1484980273b70692945fb34b2573dd"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.694775 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/1.log" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.696017 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/0.log" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.703144 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b" exitCode=1 Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.703209 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.703294 4647 scope.go:117] "RemoveContainer" containerID="e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.706443 4647 scope.go:117] "RemoveContainer" containerID="594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b" Nov 28 15:25:07 crc kubenswrapper[4647]: E1128 15:25:07.707824 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\"" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.716258 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.733669 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.750813 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.757142 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.757167 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.757174 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.757190 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.757199 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:07Z","lastTransitionTime":"2025-11-28T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.764600 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.774998 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.786850 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.800830 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.816359 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.832334 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.846593 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.859705 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.859746 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.859759 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.859778 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.859789 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:07Z","lastTransitionTime":"2025-11-28T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.865863 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.896864 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 15:25:03.543847 5818 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.544278 5818 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544437 5818 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544644 5818 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544731 5818 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544787 5818 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544881 5818 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.545404 5818 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.919876 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.935968 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.954763 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.962680 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.962719 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.962731 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.962751 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.962765 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:07Z","lastTransitionTime":"2025-11-28T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.967428 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:07 crc kubenswrapper[4647]: I1128 15:25:07.980391 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.038076 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.054299 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.066884 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.066960 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.066983 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.067012 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.067032 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:08Z","lastTransitionTime":"2025-11-28T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.071850 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.111235 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 15:25:03.543847 5818 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.544278 5818 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544437 5818 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544644 5818 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544731 5818 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544787 5818 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544881 5818 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.545404 5818 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"diagnostics/network-check-source-55646444c4-trplf\\\\nI1128 15:25:06.241273 5995 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nI1128 15:25:06.241276 5995 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI1128 15:25:06.241281 5995 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nF1128 15:25:06.241278 5995 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z]\\\\nI1128 15:25:06.241287 5995 ovn.go:134]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.141313 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.169852 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.169893 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.169906 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.169928 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.169943 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:08Z","lastTransitionTime":"2025-11-28T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.173006 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.191443 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.203496 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.216082 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.228380 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.241003 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.252074 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.262656 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.272220 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.272255 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.272271 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.272292 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.272309 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:08Z","lastTransitionTime":"2025-11-28T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.273086 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.284228 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.374827 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.374858 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.374870 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.374882 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.374891 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:08Z","lastTransitionTime":"2025-11-28T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.393256 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:08 crc kubenswrapper[4647]: E1128 15:25:08.393365 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.393693 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:08 crc kubenswrapper[4647]: E1128 15:25:08.393746 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.393798 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:08 crc kubenswrapper[4647]: E1128 15:25:08.393850 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.394197 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:08 crc kubenswrapper[4647]: E1128 15:25:08.394325 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.478093 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.478269 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.478290 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.478316 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.478336 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:08Z","lastTransitionTime":"2025-11-28T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.544894 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:08 crc kubenswrapper[4647]: E1128 15:25:08.545105 4647 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:08 crc kubenswrapper[4647]: E1128 15:25:08.545201 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs podName:a79b0b39-cffb-4ac3-a526-837c6aa70616 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:10.545179157 +0000 UTC m=+40.392785578 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs") pod "network-metrics-daemon-cz6sq" (UID: "a79b0b39-cffb-4ac3-a526-837c6aa70616") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.581479 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.581563 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.581590 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.581619 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.581641 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:08Z","lastTransitionTime":"2025-11-28T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.684891 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.684972 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.684995 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.685025 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.685044 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:08Z","lastTransitionTime":"2025-11-28T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.708365 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/1.log" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.787192 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.787255 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.787266 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.787296 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.787310 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:08Z","lastTransitionTime":"2025-11-28T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.890062 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.890090 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.890098 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.890114 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.890126 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:08Z","lastTransitionTime":"2025-11-28T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.993513 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.993572 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.993585 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.993610 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:08 crc kubenswrapper[4647]: I1128 15:25:08.993626 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:08Z","lastTransitionTime":"2025-11-28T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.097350 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.097439 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.097459 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.097484 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.097504 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:09Z","lastTransitionTime":"2025-11-28T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.200979 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.201035 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.201047 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.201064 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.201075 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:09Z","lastTransitionTime":"2025-11-28T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.305030 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.305091 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.305108 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.305136 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.305154 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:09Z","lastTransitionTime":"2025-11-28T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.408821 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.408912 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.408938 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.408976 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.409534 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:09Z","lastTransitionTime":"2025-11-28T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.520002 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.520214 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.520251 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.520288 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.520328 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:09Z","lastTransitionTime":"2025-11-28T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.624338 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.624809 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.625039 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.625310 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.625598 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:09Z","lastTransitionTime":"2025-11-28T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.729314 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.729702 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.729793 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.729898 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.729993 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:09Z","lastTransitionTime":"2025-11-28T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.832882 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.833873 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.833971 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.834066 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.834150 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:09Z","lastTransitionTime":"2025-11-28T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.937653 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.938167 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.938364 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.938635 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:09 crc kubenswrapper[4647]: I1128 15:25:09.938814 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:09Z","lastTransitionTime":"2025-11-28T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.041683 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.041728 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.041737 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.041753 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.041767 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:10Z","lastTransitionTime":"2025-11-28T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.146006 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.146087 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.146109 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.146141 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.146159 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:10Z","lastTransitionTime":"2025-11-28T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.249490 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.249553 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.249566 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.249587 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.249601 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:10Z","lastTransitionTime":"2025-11-28T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.352669 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.352773 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.352802 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.352839 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.352871 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:10Z","lastTransitionTime":"2025-11-28T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.393840 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.393950 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.393957 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.394012 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:10 crc kubenswrapper[4647]: E1128 15:25:10.394703 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:10 crc kubenswrapper[4647]: E1128 15:25:10.394934 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:10 crc kubenswrapper[4647]: E1128 15:25:10.395222 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:10 crc kubenswrapper[4647]: E1128 15:25:10.395386 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.418622 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.437892 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.456243 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.456284 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.456297 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.456315 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.456328 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:10Z","lastTransitionTime":"2025-11-28T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.459630 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.476596 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.493600 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.517841 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.558424 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.558472 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.558487 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.558509 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.558523 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:10Z","lastTransitionTime":"2025-11-28T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.563566 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 15:25:03.543847 5818 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.544278 5818 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544437 5818 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544644 5818 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544731 5818 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544787 5818 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544881 5818 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.545404 5818 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"diagnostics/network-check-source-55646444c4-trplf\\\\nI1128 15:25:06.241273 5995 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nI1128 15:25:06.241276 5995 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI1128 15:25:06.241281 5995 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nF1128 15:25:06.241278 5995 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z]\\\\nI1128 15:25:06.241287 5995 ovn.go:134]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.569559 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:10 crc kubenswrapper[4647]: E1128 15:25:10.569859 4647 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:10 crc kubenswrapper[4647]: E1128 15:25:10.569975 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs podName:a79b0b39-cffb-4ac3-a526-837c6aa70616 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:14.56994509 +0000 UTC m=+44.417551551 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs") pod "network-metrics-daemon-cz6sq" (UID: "a79b0b39-cffb-4ac3-a526-837c6aa70616") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.591015 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.608193 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.632504 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.661871 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.662360 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.662534 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.662653 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.662744 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:10Z","lastTransitionTime":"2025-11-28T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.665435 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.686848 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.703377 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.715091 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.732436 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.747605 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.765772 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.765822 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.765848 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.765877 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.765900 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:10Z","lastTransitionTime":"2025-11-28T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.868527 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.868574 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.868590 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.868615 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.868630 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:10Z","lastTransitionTime":"2025-11-28T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.972653 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.972729 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.972751 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.972780 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:10 crc kubenswrapper[4647]: I1128 15:25:10.972802 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:10Z","lastTransitionTime":"2025-11-28T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.074924 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.075202 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.075275 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.075344 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.075404 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:11Z","lastTransitionTime":"2025-11-28T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.179239 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.179640 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.179802 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.179965 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.180304 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:11Z","lastTransitionTime":"2025-11-28T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.283381 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.283936 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.284149 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.284376 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.285044 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:11Z","lastTransitionTime":"2025-11-28T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.388795 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.389201 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.389371 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.389618 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.389798 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:11Z","lastTransitionTime":"2025-11-28T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.493211 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.493259 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.493272 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.493289 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.493300 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:11Z","lastTransitionTime":"2025-11-28T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.596276 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.596745 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.596928 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.597108 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.597283 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:11Z","lastTransitionTime":"2025-11-28T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.700234 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.700539 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.700661 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.700778 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.700873 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:11Z","lastTransitionTime":"2025-11-28T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.803139 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.803182 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.803190 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.803206 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.803216 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:11Z","lastTransitionTime":"2025-11-28T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.905874 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.906504 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.906576 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.906659 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:11 crc kubenswrapper[4647]: I1128 15:25:11.906730 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:11Z","lastTransitionTime":"2025-11-28T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.010128 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.010188 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.010204 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.010226 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.010242 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:12Z","lastTransitionTime":"2025-11-28T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.113128 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.113177 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.113192 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.113207 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.113218 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:12Z","lastTransitionTime":"2025-11-28T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.216105 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.216165 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.216177 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.216200 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.216219 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:12Z","lastTransitionTime":"2025-11-28T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.318969 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.319010 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.319024 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.319044 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.319056 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:12Z","lastTransitionTime":"2025-11-28T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.396708 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.396743 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.396706 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.396837 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:12 crc kubenswrapper[4647]: E1128 15:25:12.396898 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:12 crc kubenswrapper[4647]: E1128 15:25:12.397018 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:12 crc kubenswrapper[4647]: E1128 15:25:12.397073 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:12 crc kubenswrapper[4647]: E1128 15:25:12.397154 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.421808 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.421856 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.421873 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.421897 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.421916 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:12Z","lastTransitionTime":"2025-11-28T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.525516 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.525595 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.525621 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.525659 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.525686 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:12Z","lastTransitionTime":"2025-11-28T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.630341 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.630784 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.630819 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.630846 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.630861 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:12Z","lastTransitionTime":"2025-11-28T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.734523 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.734591 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.734611 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.734643 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.734663 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:12Z","lastTransitionTime":"2025-11-28T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.838457 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.838528 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.838543 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.838569 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.838585 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:12Z","lastTransitionTime":"2025-11-28T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.941878 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.942814 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.942842 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.943100 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:12 crc kubenswrapper[4647]: I1128 15:25:12.943132 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:12Z","lastTransitionTime":"2025-11-28T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.047081 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.047139 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.047157 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.047183 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.047202 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:13Z","lastTransitionTime":"2025-11-28T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.151270 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.151354 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.151372 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.151405 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.151454 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:13Z","lastTransitionTime":"2025-11-28T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.254796 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.254841 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.254860 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.254881 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.254898 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:13Z","lastTransitionTime":"2025-11-28T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.358894 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.358967 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.358981 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.359008 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.359024 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:13Z","lastTransitionTime":"2025-11-28T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.462559 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.462631 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.462651 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.462686 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.462711 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:13Z","lastTransitionTime":"2025-11-28T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.566319 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.566474 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.566496 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.566526 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.566547 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:13Z","lastTransitionTime":"2025-11-28T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.670166 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.670266 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.670293 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.670328 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.670348 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:13Z","lastTransitionTime":"2025-11-28T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.774089 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.774151 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.774165 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.774186 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.774200 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:13Z","lastTransitionTime":"2025-11-28T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.878746 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.878828 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.878847 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.878880 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.878901 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:13Z","lastTransitionTime":"2025-11-28T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.982680 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.982757 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.982777 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.982808 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:13 crc kubenswrapper[4647]: I1128 15:25:13.982830 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:13Z","lastTransitionTime":"2025-11-28T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.086316 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.086398 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.086482 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.086520 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.086546 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:14Z","lastTransitionTime":"2025-11-28T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.190524 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.190581 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.190598 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.190624 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.190643 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:14Z","lastTransitionTime":"2025-11-28T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.294407 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.294505 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.294526 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.294551 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.294567 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:14Z","lastTransitionTime":"2025-11-28T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.395530 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.395590 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:14 crc kubenswrapper[4647]: E1128 15:25:14.395826 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.395877 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.395937 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:14 crc kubenswrapper[4647]: E1128 15:25:14.396120 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:14 crc kubenswrapper[4647]: E1128 15:25:14.396332 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:14 crc kubenswrapper[4647]: E1128 15:25:14.396490 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.399478 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.399541 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.399560 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.399593 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.399615 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:14Z","lastTransitionTime":"2025-11-28T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.503404 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.503497 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.503515 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.503548 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.503568 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:14Z","lastTransitionTime":"2025-11-28T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.607134 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.607213 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.607230 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.607264 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.607282 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:14Z","lastTransitionTime":"2025-11-28T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.617792 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:14 crc kubenswrapper[4647]: E1128 15:25:14.617979 4647 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:14 crc kubenswrapper[4647]: E1128 15:25:14.618074 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs podName:a79b0b39-cffb-4ac3-a526-837c6aa70616 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:22.618048038 +0000 UTC m=+52.465654499 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs") pod "network-metrics-daemon-cz6sq" (UID: "a79b0b39-cffb-4ac3-a526-837c6aa70616") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.710999 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.711065 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.711082 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.711112 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.711140 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:14Z","lastTransitionTime":"2025-11-28T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.814687 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.814743 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.814760 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.814784 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.814804 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:14Z","lastTransitionTime":"2025-11-28T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.919295 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.919386 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.919444 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.919485 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:14 crc kubenswrapper[4647]: I1128 15:25:14.919512 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:14Z","lastTransitionTime":"2025-11-28T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.023229 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.023308 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.023328 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.023356 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.023375 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:15Z","lastTransitionTime":"2025-11-28T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.128102 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.128176 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.128189 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.128215 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.128232 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:15Z","lastTransitionTime":"2025-11-28T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.231204 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.231292 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.231321 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.231357 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.231383 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:15Z","lastTransitionTime":"2025-11-28T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.334725 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.334811 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.334837 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.334866 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.334886 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:15Z","lastTransitionTime":"2025-11-28T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.438639 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.438725 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.438744 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.438777 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.438799 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:15Z","lastTransitionTime":"2025-11-28T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.542097 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.542189 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.542212 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.542251 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.542281 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:15Z","lastTransitionTime":"2025-11-28T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.645806 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.645869 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.645881 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.645903 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.645914 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:15Z","lastTransitionTime":"2025-11-28T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.748776 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.748853 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.748874 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.748912 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.748937 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:15Z","lastTransitionTime":"2025-11-28T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.852554 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.852648 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.852667 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.852701 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.852721 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:15Z","lastTransitionTime":"2025-11-28T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.956680 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.956760 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.956785 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.956816 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:15 crc kubenswrapper[4647]: I1128 15:25:15.956874 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:15Z","lastTransitionTime":"2025-11-28T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.059725 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.059864 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.059884 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.059910 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.059927 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.162995 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.163063 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.163082 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.163108 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.163126 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.313492 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.313554 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.313570 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.313594 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.313607 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.394095 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.394194 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.394218 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.394187 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:16 crc kubenswrapper[4647]: E1128 15:25:16.394504 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:16 crc kubenswrapper[4647]: E1128 15:25:16.394559 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:16 crc kubenswrapper[4647]: E1128 15:25:16.394733 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:16 crc kubenswrapper[4647]: E1128 15:25:16.394779 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.416765 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.416815 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.416833 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.416856 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.416871 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.519679 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.519731 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.519746 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.519766 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.519782 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.623026 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.623073 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.623087 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.623104 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.623117 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.726224 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.726313 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.726345 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.726385 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.726409 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.829583 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.829650 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.829669 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.829698 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.829720 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.864191 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.864244 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.864255 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.864273 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.864289 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: E1128 15:25:16.883285 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:16Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.889261 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.889304 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.889318 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.889335 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.889348 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: E1128 15:25:16.905889 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:16Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.912029 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.912121 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.912137 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.912207 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.912222 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: E1128 15:25:16.934760 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:16Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.939219 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.939295 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.939321 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.939356 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.939380 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: E1128 15:25:16.963549 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:16Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.968852 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.968944 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.968963 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.968997 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.969017 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:16 crc kubenswrapper[4647]: E1128 15:25:16.991718 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:16Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:16 crc kubenswrapper[4647]: E1128 15:25:16.991956 4647 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.994669 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.994714 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.994728 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.994749 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:16 crc kubenswrapper[4647]: I1128 15:25:16.994762 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:16Z","lastTransitionTime":"2025-11-28T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.099024 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.099125 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.099161 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.099201 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.099227 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:17Z","lastTransitionTime":"2025-11-28T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.203036 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.203086 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.203102 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.203126 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.203143 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:17Z","lastTransitionTime":"2025-11-28T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.306994 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.307085 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.307109 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.307145 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.307173 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:17Z","lastTransitionTime":"2025-11-28T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.411804 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.411857 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.411870 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.411889 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.411902 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:17Z","lastTransitionTime":"2025-11-28T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.514575 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.514644 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.514665 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.514695 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.514714 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:17Z","lastTransitionTime":"2025-11-28T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.618144 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.618198 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.618216 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.618234 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.618248 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:17Z","lastTransitionTime":"2025-11-28T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.721486 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.721530 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.721539 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.721553 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.721563 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:17Z","lastTransitionTime":"2025-11-28T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.825097 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.825157 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.825173 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.825200 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.825216 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:17Z","lastTransitionTime":"2025-11-28T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.928481 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.928557 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.928577 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.928612 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:17 crc kubenswrapper[4647]: I1128 15:25:17.928632 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:17Z","lastTransitionTime":"2025-11-28T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.031755 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.031820 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.031849 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.031883 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.031911 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:18Z","lastTransitionTime":"2025-11-28T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.135252 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.135305 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.135323 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.135351 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.135370 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:18Z","lastTransitionTime":"2025-11-28T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.241962 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.242037 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.242065 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.242097 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.242121 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:18Z","lastTransitionTime":"2025-11-28T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.346089 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.346180 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.346204 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.346233 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.346256 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:18Z","lastTransitionTime":"2025-11-28T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.394394 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.394490 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.394568 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:18 crc kubenswrapper[4647]: E1128 15:25:18.394642 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.394674 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:18 crc kubenswrapper[4647]: E1128 15:25:18.394780 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:18 crc kubenswrapper[4647]: E1128 15:25:18.394908 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:18 crc kubenswrapper[4647]: E1128 15:25:18.395095 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.450357 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.450460 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.450481 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.450507 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.450524 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:18Z","lastTransitionTime":"2025-11-28T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.553619 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.553666 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.553679 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.553694 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.553703 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:18Z","lastTransitionTime":"2025-11-28T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.649085 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.657092 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.657188 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.657205 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.657272 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.657296 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:18Z","lastTransitionTime":"2025-11-28T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.666000 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.675588 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.691137 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.708915 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.731066 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.752031 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.761706 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.761801 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.761857 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.761890 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.761913 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:18Z","lastTransitionTime":"2025-11-28T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.770282 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.787772 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.804764 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.834051 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 15:25:03.543847 5818 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.544278 5818 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544437 5818 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544644 5818 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544731 5818 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544787 5818 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544881 5818 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.545404 5818 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"diagnostics/network-check-source-55646444c4-trplf\\\\nI1128 15:25:06.241273 5995 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nI1128 15:25:06.241276 5995 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI1128 15:25:06.241281 5995 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nF1128 15:25:06.241278 5995 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z]\\\\nI1128 15:25:06.241287 5995 ovn.go:134]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.857136 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.865234 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.865291 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.865310 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.865338 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.865358 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:18Z","lastTransitionTime":"2025-11-28T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.874432 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.890714 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.910450 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.928238 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.944350 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.964112 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:18Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.970605 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.970668 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.970681 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.970700 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:18 crc kubenswrapper[4647]: I1128 15:25:18.970728 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:18Z","lastTransitionTime":"2025-11-28T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.073103 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.073179 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.073199 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.073229 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.073247 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:19Z","lastTransitionTime":"2025-11-28T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.175456 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.175526 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.175550 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.175577 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.175597 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:19Z","lastTransitionTime":"2025-11-28T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.278315 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.278381 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.278396 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.278482 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.278498 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:19Z","lastTransitionTime":"2025-11-28T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.381797 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.381848 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.381859 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.381876 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.381886 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:19Z","lastTransitionTime":"2025-11-28T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.484929 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.485076 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.485098 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.485161 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.485183 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:19Z","lastTransitionTime":"2025-11-28T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.589900 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.590261 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.590344 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.590455 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.590589 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:19Z","lastTransitionTime":"2025-11-28T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.693598 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.693712 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.693737 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.693769 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.693795 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:19Z","lastTransitionTime":"2025-11-28T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.796701 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.796782 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.796800 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.796827 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.796844 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:19Z","lastTransitionTime":"2025-11-28T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.900095 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.900152 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.900169 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.900188 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:19 crc kubenswrapper[4647]: I1128 15:25:19.900204 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:19Z","lastTransitionTime":"2025-11-28T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.004028 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.004099 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.004125 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.004156 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.004180 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:20Z","lastTransitionTime":"2025-11-28T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.106982 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.107060 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.107083 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.107111 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.107133 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:20Z","lastTransitionTime":"2025-11-28T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.210147 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.211291 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.211389 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.211557 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.211650 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:20Z","lastTransitionTime":"2025-11-28T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.315395 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.315565 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.315589 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.315626 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.315649 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:20Z","lastTransitionTime":"2025-11-28T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.393518 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.393619 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.393504 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:20 crc kubenswrapper[4647]: E1128 15:25:20.393732 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.394130 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:20 crc kubenswrapper[4647]: E1128 15:25:20.397657 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.397631 4647 scope.go:117] "RemoveContainer" containerID="594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b" Nov 28 15:25:20 crc kubenswrapper[4647]: E1128 15:25:20.398082 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:20 crc kubenswrapper[4647]: E1128 15:25:20.398341 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.418207 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.418258 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.418271 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.418292 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.418310 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:20Z","lastTransitionTime":"2025-11-28T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.441977 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e707bbe4b663e93a588f8fb035a62d2f967e20dde1774c71add39493c2aef602\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 15:25:03.543847 5818 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.544278 5818 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544437 5818 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544644 5818 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544731 5818 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544787 5818 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:25:03.544881 5818 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:25:03.545404 5818 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"diagnostics/network-check-source-55646444c4-trplf\\\\nI1128 15:25:06.241273 5995 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nI1128 15:25:06.241276 5995 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI1128 15:25:06.241281 5995 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nF1128 15:25:06.241278 5995 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z]\\\\nI1128 15:25:06.241287 5995 ovn.go:134]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.461062 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.481619 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.498117 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.511136 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.521735 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.521773 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.521785 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.521804 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.521818 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:20Z","lastTransitionTime":"2025-11-28T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.525930 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.543702 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.559593 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.576078 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.587449 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.598897 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.612346 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.622042 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.624459 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.624507 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.624524 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.624542 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.624554 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:20Z","lastTransitionTime":"2025-11-28T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.635190 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.651584 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.669843 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.683265 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.693354 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.702752 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.717330 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.726814 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.726843 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.726856 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.726873 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.726886 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:20Z","lastTransitionTime":"2025-11-28T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.731482 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.734781 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.749384 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.761991 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/1.log" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.765609 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.766170 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.766696 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.786583 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"diagnostics/network-check-source-55646444c4-trplf\\\\nI1128 15:25:06.241273 5995 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nI1128 15:25:06.241276 5995 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI1128 15:25:06.241281 5995 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nF1128 15:25:06.241278 5995 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z]\\\\nI1128 15:25:06.241287 5995 ovn.go:134]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.806292 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.825183 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.829899 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.829950 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.829962 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.829981 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.829994 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:20Z","lastTransitionTime":"2025-11-28T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.839142 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.850843 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.866670 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.885443 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.911922 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.927135 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.931701 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.931888 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.931980 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.932066 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.932134 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:20Z","lastTransitionTime":"2025-11-28T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.937941 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.947991 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.958779 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.968083 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.980900 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:20 crc kubenswrapper[4647]: I1128 15:25:20.991167 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.003368 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.017375 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.029529 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.034611 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.034824 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.034911 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.034993 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.035053 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:21Z","lastTransitionTime":"2025-11-28T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.046884 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.062359 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.081271 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"diagnostics/network-check-source-55646444c4-trplf\\\\nI1128 15:25:06.241273 5995 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nI1128 15:25:06.241276 5995 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI1128 15:25:06.241281 5995 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nF1128 15:25:06.241278 5995 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z]\\\\nI1128 15:25:06.241287 5995 ovn.go:134]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.093987 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.116113 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.128423 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.138066 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.138118 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.138129 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.138147 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.138159 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:21Z","lastTransitionTime":"2025-11-28T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.141494 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.156913 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.171733 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.184793 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.241233 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.241262 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.241272 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.241288 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.241298 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:21Z","lastTransitionTime":"2025-11-28T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.344525 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.344604 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.344613 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.344628 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.344637 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:21Z","lastTransitionTime":"2025-11-28T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.447273 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.447301 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.447308 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.447320 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.447329 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:21Z","lastTransitionTime":"2025-11-28T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.550546 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.551082 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.551207 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.551343 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.551614 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:21Z","lastTransitionTime":"2025-11-28T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.654749 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.655311 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.655333 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.655364 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.655386 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:21Z","lastTransitionTime":"2025-11-28T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.758122 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.758172 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.758188 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.758211 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.758225 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:21Z","lastTransitionTime":"2025-11-28T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.770662 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/2.log" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.771452 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/1.log" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.774955 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45" exitCode=1 Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.775009 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45"} Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.775052 4647 scope.go:117] "RemoveContainer" containerID="594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.776058 4647 scope.go:117] "RemoveContainer" containerID="8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45" Nov 28 15:25:21 crc kubenswrapper[4647]: E1128 15:25:21.776355 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\"" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.789525 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.800692 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.813027 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.825227 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.836603 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.847806 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.861235 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.861275 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.861294 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.861318 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.861334 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:21Z","lastTransitionTime":"2025-11-28T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.864109 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.877068 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.889769 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.911653 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.927672 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.943399 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.964748 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:21Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.965695 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.965752 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.965774 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.965804 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:21 crc kubenswrapper[4647]: I1128 15:25:21.965829 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:21Z","lastTransitionTime":"2025-11-28T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.013059 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.050374 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://594c6dc6d8ad8d416895a18caa2f3ec2494e08b851efe5413132a48ce072128b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"diagnostics/network-check-source-55646444c4-trplf\\\\nI1128 15:25:06.241273 5995 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nI1128 15:25:06.241276 5995 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI1128 15:25:06.241281 5995 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-7mwt4\\\\nF1128 15:25:06.241278 5995 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:06Z is after 2025-08-24T17:21:41Z]\\\\nI1128 15:25:06.241287 5995 ovn.go:134]\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:21Z\\\",\\\"message\\\":\\\"0.0.1:29103\\\\\\\"\\\\nI1128 15:25:21.276286 6179 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276293 6179 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276321 6179 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-k7msq in node crc\\\\nI1128 15:25:21.276333 6179 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-k7msq after 0 failed attempt(s)\\\\nI1128 15:25:21.276340 6179 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-k7msq\\\\nF1128 15:25:21.276340 6179 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.068376 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.068447 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.068461 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.068483 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.068495 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:22Z","lastTransitionTime":"2025-11-28T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.069208 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.083532 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.172898 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.172965 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.172992 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.173068 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.173085 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:22Z","lastTransitionTime":"2025-11-28T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.275638 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.275698 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.275712 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.275733 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.275745 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:22Z","lastTransitionTime":"2025-11-28T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.378943 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.378974 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.378981 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.379011 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.379023 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:22Z","lastTransitionTime":"2025-11-28T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.393761 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.393924 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.394121 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.394680 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.394776 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.394851 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.395100 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.395375 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.432160 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:25:54.432110343 +0000 UTC m=+84.279716794 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.431991 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.433061 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.433356 4647 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.433676 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:54.433649086 +0000 UTC m=+84.281255547 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.433937 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.434255 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.434299 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.434907 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.435097 4647 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.434483 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.435465 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.435492 4647 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.436218 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:54.436199687 +0000 UTC m=+84.283806148 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.436401 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:54.436378122 +0000 UTC m=+84.283984583 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.436614 4647 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.436769 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:54.436740932 +0000 UTC m=+84.284347393 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.436622 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.482373 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.482853 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.482996 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.483137 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.483328 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:22Z","lastTransitionTime":"2025-11-28T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.586062 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.586442 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.586540 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.586602 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.586658 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:22Z","lastTransitionTime":"2025-11-28T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.639177 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.639302 4647 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.639676 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs podName:a79b0b39-cffb-4ac3-a526-837c6aa70616 nodeName:}" failed. No retries permitted until 2025-11-28 15:25:38.639638333 +0000 UTC m=+68.487244754 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs") pod "network-metrics-daemon-cz6sq" (UID: "a79b0b39-cffb-4ac3-a526-837c6aa70616") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.716943 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.716989 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.716999 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.717015 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.717025 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:22Z","lastTransitionTime":"2025-11-28T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.779788 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/2.log" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.784539 4647 scope.go:117] "RemoveContainer" containerID="8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45" Nov 28 15:25:22 crc kubenswrapper[4647]: E1128 15:25:22.785855 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\"" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.796957 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.809483 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.819827 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.819991 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.820053 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.820115 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.820170 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:22Z","lastTransitionTime":"2025-11-28T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.826779 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.838097 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.849267 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.863951 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.884915 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.905125 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.922694 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.922923 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.923088 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.923243 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.923371 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:22Z","lastTransitionTime":"2025-11-28T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.925132 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.952638 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:21Z\\\",\\\"message\\\":\\\"0.0.1:29103\\\\\\\"\\\\nI1128 15:25:21.276286 6179 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276293 6179 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276321 6179 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-k7msq in node crc\\\\nI1128 15:25:21.276333 6179 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-k7msq after 0 failed attempt(s)\\\\nI1128 15:25:21.276340 6179 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-k7msq\\\\nF1128 15:25:21.276340 6179 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.977106 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:22 crc kubenswrapper[4647]: I1128 15:25:22.992859 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:22Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.009372 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:23Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.023736 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:23Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.026467 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.026516 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.026525 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.026540 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.026550 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:23Z","lastTransitionTime":"2025-11-28T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.037835 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:23Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.051614 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:23Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.063138 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:23Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.129760 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.130132 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.130213 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.130314 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.130393 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:23Z","lastTransitionTime":"2025-11-28T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.233872 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.233939 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.233962 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.233994 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.234016 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:23Z","lastTransitionTime":"2025-11-28T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.337235 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.337670 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.337732 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.337775 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.337802 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:23Z","lastTransitionTime":"2025-11-28T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.441312 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.441371 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.441387 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.441407 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.441443 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:23Z","lastTransitionTime":"2025-11-28T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.544772 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.544831 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.544850 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.544875 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.544895 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:23Z","lastTransitionTime":"2025-11-28T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.648062 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.648115 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.648135 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.648160 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.648181 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:23Z","lastTransitionTime":"2025-11-28T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.750941 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.751012 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.751031 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.751058 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.751076 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:23Z","lastTransitionTime":"2025-11-28T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.853778 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.853817 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.853830 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.853850 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.853863 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:23Z","lastTransitionTime":"2025-11-28T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.957615 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.957694 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.957713 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.957740 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:23 crc kubenswrapper[4647]: I1128 15:25:23.957759 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:23Z","lastTransitionTime":"2025-11-28T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.060798 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.060876 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.060897 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.060926 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.060945 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:24Z","lastTransitionTime":"2025-11-28T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.164695 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.164774 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.164792 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.164822 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.164842 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:24Z","lastTransitionTime":"2025-11-28T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.268028 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.268094 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.268107 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.268126 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.268145 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:24Z","lastTransitionTime":"2025-11-28T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.371927 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.371985 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.372004 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.372024 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.372039 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:24Z","lastTransitionTime":"2025-11-28T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.394306 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.394373 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.394313 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.394396 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:24 crc kubenswrapper[4647]: E1128 15:25:24.394570 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:24 crc kubenswrapper[4647]: E1128 15:25:24.394721 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:24 crc kubenswrapper[4647]: E1128 15:25:24.394798 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:24 crc kubenswrapper[4647]: E1128 15:25:24.394863 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.475402 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.475506 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.475526 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.475554 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.475572 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:24Z","lastTransitionTime":"2025-11-28T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.580956 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.581017 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.581033 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.581059 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.581079 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:24Z","lastTransitionTime":"2025-11-28T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.684717 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.684785 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.684797 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.684819 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.684833 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:24Z","lastTransitionTime":"2025-11-28T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.788646 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.788692 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.788701 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.788719 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.788730 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:24Z","lastTransitionTime":"2025-11-28T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.891549 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.891616 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.891637 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.891665 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.891683 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:24Z","lastTransitionTime":"2025-11-28T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.994776 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.994843 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.994862 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.994889 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:24 crc kubenswrapper[4647]: I1128 15:25:24.994908 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:24Z","lastTransitionTime":"2025-11-28T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.097324 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.097470 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.097492 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.097530 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.097549 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:25Z","lastTransitionTime":"2025-11-28T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.200383 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.200527 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.200548 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.200574 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.200587 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:25Z","lastTransitionTime":"2025-11-28T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.303929 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.303983 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.304003 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.304026 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.304046 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:25Z","lastTransitionTime":"2025-11-28T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.408120 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.408194 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.408211 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.408235 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.408252 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:25Z","lastTransitionTime":"2025-11-28T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.512102 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.512173 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.512193 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.512219 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.512241 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:25Z","lastTransitionTime":"2025-11-28T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.615755 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.615799 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.615810 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.615826 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.615838 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:25Z","lastTransitionTime":"2025-11-28T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.718440 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.718493 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.718505 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.718525 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.718538 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:25Z","lastTransitionTime":"2025-11-28T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.821157 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.821209 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.821226 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.821248 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.821264 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:25Z","lastTransitionTime":"2025-11-28T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.923783 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.923851 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.923874 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.923904 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:25 crc kubenswrapper[4647]: I1128 15:25:25.923926 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:25Z","lastTransitionTime":"2025-11-28T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.028150 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.028216 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.028233 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.028259 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.028280 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:26Z","lastTransitionTime":"2025-11-28T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.130225 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.130259 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.130267 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.130281 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.130289 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:26Z","lastTransitionTime":"2025-11-28T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.233779 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.233844 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.233854 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.233872 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.233882 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:26Z","lastTransitionTime":"2025-11-28T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.337655 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.337713 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.337727 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.337750 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.337764 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:26Z","lastTransitionTime":"2025-11-28T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.394108 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.394233 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:26 crc kubenswrapper[4647]: E1128 15:25:26.394282 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.394114 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.394138 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:26 crc kubenswrapper[4647]: E1128 15:25:26.394403 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:26 crc kubenswrapper[4647]: E1128 15:25:26.394599 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:26 crc kubenswrapper[4647]: E1128 15:25:26.394661 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.440515 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.440553 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.440562 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.440575 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.440585 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:26Z","lastTransitionTime":"2025-11-28T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.542436 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.542476 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.542486 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.542500 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.542512 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:26Z","lastTransitionTime":"2025-11-28T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.645869 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.645951 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.646243 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.646281 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.646294 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:26Z","lastTransitionTime":"2025-11-28T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.749340 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.749395 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.749452 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.749477 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.749494 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:26Z","lastTransitionTime":"2025-11-28T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.852132 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.852176 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.852191 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.852211 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.852224 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:26Z","lastTransitionTime":"2025-11-28T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.963934 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.963975 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.963985 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.963999 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:26 crc kubenswrapper[4647]: I1128 15:25:26.964009 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:26Z","lastTransitionTime":"2025-11-28T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.066598 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.066641 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.066650 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.066667 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.066678 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.169295 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.169345 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.169357 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.169373 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.169385 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.272522 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.272570 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.272581 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.272599 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.272611 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.294521 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.294567 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.294578 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.294596 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.294607 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: E1128 15:25:27.306561 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.310273 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.310460 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.310524 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.310602 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.310664 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: E1128 15:25:27.325606 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.329328 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.329384 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.329394 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.329432 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.329443 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: E1128 15:25:27.341296 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.345730 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.345771 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.345784 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.345802 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.345814 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: E1128 15:25:27.360211 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.364127 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.364171 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.364183 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.364199 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.364211 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: E1128 15:25:27.376757 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:27 crc kubenswrapper[4647]: E1128 15:25:27.376892 4647 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.378812 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.378843 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.378855 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.378872 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.378886 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.481429 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.481476 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.481491 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.481511 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.481522 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.584266 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.584647 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.584752 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.584844 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.584940 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.687909 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.688249 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.688368 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.688516 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.688599 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.791971 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.793070 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.793174 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.793259 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.793340 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.895826 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.895882 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.895896 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.895919 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.895930 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.999012 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.999362 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.999513 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.999796 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:27 crc kubenswrapper[4647]: I1128 15:25:27.999868 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:27Z","lastTransitionTime":"2025-11-28T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.102175 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.102246 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.102263 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.102283 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.102296 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:28Z","lastTransitionTime":"2025-11-28T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.205881 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.205946 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.205964 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.205991 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.206019 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:28Z","lastTransitionTime":"2025-11-28T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.308708 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.308818 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.308847 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.308881 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.308907 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:28Z","lastTransitionTime":"2025-11-28T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.393713 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.393775 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.393803 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.393826 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:28 crc kubenswrapper[4647]: E1128 15:25:28.394682 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:28 crc kubenswrapper[4647]: E1128 15:25:28.394797 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:28 crc kubenswrapper[4647]: E1128 15:25:28.395043 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:28 crc kubenswrapper[4647]: E1128 15:25:28.395478 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.411904 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.411961 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.411981 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.412004 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.412023 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:28Z","lastTransitionTime":"2025-11-28T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.514743 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.514811 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.514830 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.514857 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.514881 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:28Z","lastTransitionTime":"2025-11-28T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.618073 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.618134 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.618152 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.618179 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.618232 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:28Z","lastTransitionTime":"2025-11-28T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.720977 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.721010 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.721020 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.721033 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.721044 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:28Z","lastTransitionTime":"2025-11-28T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.823329 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.823363 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.823371 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.823386 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.823397 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:28Z","lastTransitionTime":"2025-11-28T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.926209 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.926251 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.926265 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.926283 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:28 crc kubenswrapper[4647]: I1128 15:25:28.926297 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:28Z","lastTransitionTime":"2025-11-28T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.029573 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.029622 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.029634 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.029650 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.029660 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:29Z","lastTransitionTime":"2025-11-28T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.132146 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.132220 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.132237 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.132256 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.132269 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:29Z","lastTransitionTime":"2025-11-28T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.235441 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.235509 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.235527 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.235553 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.235570 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:29Z","lastTransitionTime":"2025-11-28T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.338600 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.338642 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.338653 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.338669 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.338682 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:29Z","lastTransitionTime":"2025-11-28T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.441245 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.441306 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.441317 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.441333 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.441348 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:29Z","lastTransitionTime":"2025-11-28T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.544391 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.544554 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.544576 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.544601 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.544618 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:29Z","lastTransitionTime":"2025-11-28T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.648149 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.648210 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.648228 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.648255 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.648275 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:29Z","lastTransitionTime":"2025-11-28T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.751305 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.751375 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.751392 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.751456 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.751474 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:29Z","lastTransitionTime":"2025-11-28T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.854815 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.854883 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.854904 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.854935 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.854961 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:29Z","lastTransitionTime":"2025-11-28T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.957570 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.957605 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.957614 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.957627 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:29 crc kubenswrapper[4647]: I1128 15:25:29.957637 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:29Z","lastTransitionTime":"2025-11-28T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.060301 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.060354 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.060364 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.060380 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.060390 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:30Z","lastTransitionTime":"2025-11-28T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.163213 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.163543 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.163625 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.163701 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.163777 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:30Z","lastTransitionTime":"2025-11-28T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.266657 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.266943 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.267033 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.267123 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.267215 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:30Z","lastTransitionTime":"2025-11-28T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.369668 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.369736 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.369755 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.369782 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.369799 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:30Z","lastTransitionTime":"2025-11-28T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.393693 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:30 crc kubenswrapper[4647]: E1128 15:25:30.393894 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.394159 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.394203 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:30 crc kubenswrapper[4647]: E1128 15:25:30.394280 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.394635 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:30 crc kubenswrapper[4647]: E1128 15:25:30.394726 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:30 crc kubenswrapper[4647]: E1128 15:25:30.394820 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.422826 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.439226 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.454539 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.472473 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.472589 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.472610 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.472681 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.472701 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:30Z","lastTransitionTime":"2025-11-28T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.477470 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.506933 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:21Z\\\",\\\"message\\\":\\\"0.0.1:29103\\\\\\\"\\\\nI1128 15:25:21.276286 6179 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276293 6179 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276321 6179 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-k7msq in node crc\\\\nI1128 15:25:21.276333 6179 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-k7msq after 0 failed attempt(s)\\\\nI1128 15:25:21.276340 6179 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-k7msq\\\\nF1128 15:25:21.276340 6179 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.528855 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.551552 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.565459 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.577380 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.577480 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.577494 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.577538 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.577551 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:30Z","lastTransitionTime":"2025-11-28T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.577979 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.595328 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.613203 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.627651 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.642385 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.663657 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.681950 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.682010 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.682021 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.682040 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.682053 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:30Z","lastTransitionTime":"2025-11-28T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.686998 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.706765 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.726114 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.783813 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.783866 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.783878 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.783897 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.783909 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:30Z","lastTransitionTime":"2025-11-28T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.886347 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.886487 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.886512 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.886544 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.886565 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:30Z","lastTransitionTime":"2025-11-28T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.989487 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.989522 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.989546 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.989566 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:30 crc kubenswrapper[4647]: I1128 15:25:30.989576 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:30Z","lastTransitionTime":"2025-11-28T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.091869 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.092246 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.092255 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.092272 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.092286 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:31Z","lastTransitionTime":"2025-11-28T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.196033 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.196087 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.196100 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.196120 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.196137 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:31Z","lastTransitionTime":"2025-11-28T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.299446 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.299511 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.299531 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.299556 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.299573 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:31Z","lastTransitionTime":"2025-11-28T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.402152 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.402211 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.402231 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.402253 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.402272 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:31Z","lastTransitionTime":"2025-11-28T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.511895 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.511956 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.511969 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.511990 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.512002 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:31Z","lastTransitionTime":"2025-11-28T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.615645 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.615726 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.615748 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.615781 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.615802 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:31Z","lastTransitionTime":"2025-11-28T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.718855 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.718913 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.718926 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.718947 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.718961 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:31Z","lastTransitionTime":"2025-11-28T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.821229 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.821282 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.821299 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.821319 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.821331 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:31Z","lastTransitionTime":"2025-11-28T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.924311 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.924355 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.924366 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.924385 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:31 crc kubenswrapper[4647]: I1128 15:25:31.924396 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:31Z","lastTransitionTime":"2025-11-28T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.027693 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.027767 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.027790 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.027817 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.027836 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:32Z","lastTransitionTime":"2025-11-28T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.130488 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.130564 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.130585 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.130614 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.130636 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:32Z","lastTransitionTime":"2025-11-28T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.232978 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.233257 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.233364 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.233465 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.233540 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:32Z","lastTransitionTime":"2025-11-28T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.337055 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.337111 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.337127 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.337151 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.337180 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:32Z","lastTransitionTime":"2025-11-28T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.394092 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.394159 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.394171 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:32 crc kubenswrapper[4647]: E1128 15:25:32.394274 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.394322 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:32 crc kubenswrapper[4647]: E1128 15:25:32.394500 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:32 crc kubenswrapper[4647]: E1128 15:25:32.394648 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:32 crc kubenswrapper[4647]: E1128 15:25:32.394832 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.440083 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.440253 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.440316 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.440387 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.440486 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:32Z","lastTransitionTime":"2025-11-28T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.544169 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.544230 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.544253 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.544280 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.544298 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:32Z","lastTransitionTime":"2025-11-28T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.648127 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.648192 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.648211 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.648238 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.648257 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:32Z","lastTransitionTime":"2025-11-28T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.752152 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.752218 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.752232 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.752257 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.752276 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:32Z","lastTransitionTime":"2025-11-28T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.857176 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.857235 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.857257 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.857283 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.857302 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:32Z","lastTransitionTime":"2025-11-28T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.960643 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.960801 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.960822 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.960882 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:32 crc kubenswrapper[4647]: I1128 15:25:32.960903 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:32Z","lastTransitionTime":"2025-11-28T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.067832 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.067892 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.067916 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.067939 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.067954 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:33Z","lastTransitionTime":"2025-11-28T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.170683 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.170726 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.170736 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.170753 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.170766 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:33Z","lastTransitionTime":"2025-11-28T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.274005 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.274072 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.274093 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.274116 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.274128 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:33Z","lastTransitionTime":"2025-11-28T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.377175 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.377246 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.377261 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.377284 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.377300 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:33Z","lastTransitionTime":"2025-11-28T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.480973 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.481014 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.481023 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.481039 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.481051 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:33Z","lastTransitionTime":"2025-11-28T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.584294 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.584376 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.584440 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.584475 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.584499 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:33Z","lastTransitionTime":"2025-11-28T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.688084 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.688129 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.688141 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.688156 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.688167 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:33Z","lastTransitionTime":"2025-11-28T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.790825 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.790892 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.790914 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.790945 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.790970 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:33Z","lastTransitionTime":"2025-11-28T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.893527 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.893564 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.893577 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.893594 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.893607 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:33Z","lastTransitionTime":"2025-11-28T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.996682 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.996725 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.996736 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.996755 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:33 crc kubenswrapper[4647]: I1128 15:25:33.996764 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:33Z","lastTransitionTime":"2025-11-28T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.101592 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.101644 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.101655 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.101674 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.101686 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:34Z","lastTransitionTime":"2025-11-28T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.205182 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.205229 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.205238 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.205256 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.205267 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:34Z","lastTransitionTime":"2025-11-28T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.307915 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.307967 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.307981 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.308000 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.308015 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:34Z","lastTransitionTime":"2025-11-28T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.393526 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.393567 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.393599 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:34 crc kubenswrapper[4647]: E1128 15:25:34.393659 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.393697 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:34 crc kubenswrapper[4647]: E1128 15:25:34.393792 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:34 crc kubenswrapper[4647]: E1128 15:25:34.393881 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:34 crc kubenswrapper[4647]: E1128 15:25:34.393963 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.410763 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.410792 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.410801 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.410813 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.410823 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:34Z","lastTransitionTime":"2025-11-28T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.513466 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.513540 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.513577 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.513607 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.513626 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:34Z","lastTransitionTime":"2025-11-28T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.616354 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.616399 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.616432 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.616454 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.616467 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:34Z","lastTransitionTime":"2025-11-28T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.719562 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.719612 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.719628 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.719674 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.719691 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:34Z","lastTransitionTime":"2025-11-28T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.821783 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.821852 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.821872 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.821899 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.821924 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:34Z","lastTransitionTime":"2025-11-28T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.923793 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.923845 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.923858 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.923877 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:34 crc kubenswrapper[4647]: I1128 15:25:34.923889 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:34Z","lastTransitionTime":"2025-11-28T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.026222 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.026269 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.026285 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.026300 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.026314 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:35Z","lastTransitionTime":"2025-11-28T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.129076 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.129140 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.129157 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.129197 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.129217 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:35Z","lastTransitionTime":"2025-11-28T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.232249 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.232311 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.232327 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.232348 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.232365 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:35Z","lastTransitionTime":"2025-11-28T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.334263 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.334308 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.334318 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.334334 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.334345 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:35Z","lastTransitionTime":"2025-11-28T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.436944 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.437001 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.437018 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.437042 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.437056 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:35Z","lastTransitionTime":"2025-11-28T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.539200 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.539244 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.539254 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.539266 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.539274 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:35Z","lastTransitionTime":"2025-11-28T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.641320 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.641361 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.641370 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.641387 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.641398 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:35Z","lastTransitionTime":"2025-11-28T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.743659 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.743724 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.743737 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.743777 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.743792 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:35Z","lastTransitionTime":"2025-11-28T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.846013 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.846074 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.846085 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.846106 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.846122 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:35Z","lastTransitionTime":"2025-11-28T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.948994 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.949050 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.949058 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.949075 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:35 crc kubenswrapper[4647]: I1128 15:25:35.949086 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:35Z","lastTransitionTime":"2025-11-28T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.051874 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.051936 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.051946 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.051963 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.051973 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:36Z","lastTransitionTime":"2025-11-28T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.155511 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.155583 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.155598 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.155618 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.155655 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:36Z","lastTransitionTime":"2025-11-28T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.257935 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.257985 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.257996 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.258014 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.258028 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:36Z","lastTransitionTime":"2025-11-28T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.360725 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.360779 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.360793 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.360817 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.360832 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:36Z","lastTransitionTime":"2025-11-28T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.394401 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.394471 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.394501 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.394522 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:36 crc kubenswrapper[4647]: E1128 15:25:36.395017 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:36 crc kubenswrapper[4647]: E1128 15:25:36.395183 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:36 crc kubenswrapper[4647]: E1128 15:25:36.395319 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:36 crc kubenswrapper[4647]: E1128 15:25:36.395463 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.396981 4647 scope.go:117] "RemoveContainer" containerID="8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45" Nov 28 15:25:36 crc kubenswrapper[4647]: E1128 15:25:36.397500 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\"" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.464459 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.464526 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.464546 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.464602 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.464622 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:36Z","lastTransitionTime":"2025-11-28T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.567431 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.567477 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.567488 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.567508 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.567525 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:36Z","lastTransitionTime":"2025-11-28T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.669962 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.670025 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.670036 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.670069 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.670082 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:36Z","lastTransitionTime":"2025-11-28T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.772365 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.772451 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.772465 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.772485 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.772499 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:36Z","lastTransitionTime":"2025-11-28T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.875679 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.875729 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.875758 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.875777 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.875788 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:36Z","lastTransitionTime":"2025-11-28T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.978846 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.978907 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.978921 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.978940 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:36 crc kubenswrapper[4647]: I1128 15:25:36.978952 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:36Z","lastTransitionTime":"2025-11-28T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.082015 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.082058 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.082068 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.082083 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.082095 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.185863 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.185899 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.185908 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.185922 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.185935 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.289091 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.289138 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.289148 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.289169 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.289179 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.392073 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.392133 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.392147 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.392165 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.392177 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.411925 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.411980 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.411997 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.412020 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.412038 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: E1128 15:25:37.432885 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:37Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.437334 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.437371 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.437404 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.437440 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.437453 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: E1128 15:25:37.454280 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:37Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.459450 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.459489 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.459502 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.459516 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.459525 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: E1128 15:25:37.476676 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:37Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.481520 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.481582 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.481601 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.481626 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.481646 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: E1128 15:25:37.500199 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:37Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.504970 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.505012 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.505029 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.505053 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.505070 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: E1128 15:25:37.519567 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:37Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:37 crc kubenswrapper[4647]: E1128 15:25:37.519683 4647 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.521693 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.521757 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.521769 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.521786 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.521796 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.624096 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.624153 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.624166 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.624207 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.624220 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.726628 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.726683 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.726701 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.726726 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.726747 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.834493 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.834540 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.834561 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.834579 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.834595 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.937377 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.937454 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.937469 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.937487 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:37 crc kubenswrapper[4647]: I1128 15:25:37.937498 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:37Z","lastTransitionTime":"2025-11-28T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.040043 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.040094 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.040109 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.040130 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.040143 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:38Z","lastTransitionTime":"2025-11-28T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.144495 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.144553 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.144573 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.144600 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.144617 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:38Z","lastTransitionTime":"2025-11-28T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.248021 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.248077 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.248106 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.248123 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.248154 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:38Z","lastTransitionTime":"2025-11-28T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.351188 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.351243 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.351254 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.351271 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.351283 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:38Z","lastTransitionTime":"2025-11-28T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.393552 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.393654 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.393713 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:38 crc kubenswrapper[4647]: E1128 15:25:38.393851 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.394063 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:38 crc kubenswrapper[4647]: E1128 15:25:38.394138 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:38 crc kubenswrapper[4647]: E1128 15:25:38.394302 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:38 crc kubenswrapper[4647]: E1128 15:25:38.394402 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.454343 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.454377 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.454386 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.454400 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.454440 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:38Z","lastTransitionTime":"2025-11-28T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.556784 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.556828 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.556836 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.556853 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.556864 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:38Z","lastTransitionTime":"2025-11-28T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.659243 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.659276 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.659284 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.659300 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.659312 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:38Z","lastTransitionTime":"2025-11-28T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.737493 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:38 crc kubenswrapper[4647]: E1128 15:25:38.737669 4647 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:38 crc kubenswrapper[4647]: E1128 15:25:38.737753 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs podName:a79b0b39-cffb-4ac3-a526-837c6aa70616 nodeName:}" failed. No retries permitted until 2025-11-28 15:26:10.737735721 +0000 UTC m=+100.585342142 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs") pod "network-metrics-daemon-cz6sq" (UID: "a79b0b39-cffb-4ac3-a526-837c6aa70616") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.761336 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.761371 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.761382 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.761400 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.761439 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:38Z","lastTransitionTime":"2025-11-28T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.863981 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.864065 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.864084 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.864111 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.864137 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:38Z","lastTransitionTime":"2025-11-28T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.966619 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.966651 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.966660 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.966675 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:38 crc kubenswrapper[4647]: I1128 15:25:38.966684 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:38Z","lastTransitionTime":"2025-11-28T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.069312 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.069343 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.069351 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.069365 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.069376 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:39Z","lastTransitionTime":"2025-11-28T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.172168 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.172230 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.172240 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.172255 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.172267 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:39Z","lastTransitionTime":"2025-11-28T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.275387 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.275461 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.275475 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.275497 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.275513 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:39Z","lastTransitionTime":"2025-11-28T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.378153 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.378189 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.378199 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.378216 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.378229 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:39Z","lastTransitionTime":"2025-11-28T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.480421 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.480463 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.480473 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.480490 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.480500 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:39Z","lastTransitionTime":"2025-11-28T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.582756 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.584766 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.584797 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.584823 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.584836 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:39Z","lastTransitionTime":"2025-11-28T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.688177 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.688224 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.688236 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.688254 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.688263 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:39Z","lastTransitionTime":"2025-11-28T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.791568 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.791730 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.791754 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.791827 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.791860 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:39Z","lastTransitionTime":"2025-11-28T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.894488 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.894533 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.894544 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.894561 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.894574 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:39Z","lastTransitionTime":"2025-11-28T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.997846 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.997901 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.997912 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.997930 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:39 crc kubenswrapper[4647]: I1128 15:25:39.997947 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:39Z","lastTransitionTime":"2025-11-28T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.101175 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.101229 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.101246 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.101269 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.101291 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:40Z","lastTransitionTime":"2025-11-28T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.204017 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.204078 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.204089 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.204106 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.204116 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:40Z","lastTransitionTime":"2025-11-28T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.313220 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.313274 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.313287 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.313306 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.313323 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:40Z","lastTransitionTime":"2025-11-28T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.393484 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.393572 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.393484 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:40 crc kubenswrapper[4647]: E1128 15:25:40.393660 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.393692 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:40 crc kubenswrapper[4647]: E1128 15:25:40.393777 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:40 crc kubenswrapper[4647]: E1128 15:25:40.393845 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:40 crc kubenswrapper[4647]: E1128 15:25:40.393905 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.418396 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.418458 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.418469 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.418490 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.418503 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:40Z","lastTransitionTime":"2025-11-28T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.420729 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.436406 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.450257 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.462366 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.473671 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.482364 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.494867 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.507363 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.521402 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.521459 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.521473 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.521494 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.521505 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:40Z","lastTransitionTime":"2025-11-28T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.521856 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.536076 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.553908 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.567804 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.580920 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.592147 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.603595 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.618609 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.624636 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.624748 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.624762 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.624817 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.624832 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:40Z","lastTransitionTime":"2025-11-28T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.640786 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:21Z\\\",\\\"message\\\":\\\"0.0.1:29103\\\\\\\"\\\\nI1128 15:25:21.276286 6179 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276293 6179 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276321 6179 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-k7msq in node crc\\\\nI1128 15:25:21.276333 6179 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-k7msq after 0 failed attempt(s)\\\\nI1128 15:25:21.276340 6179 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-k7msq\\\\nF1128 15:25:21.276340 6179 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.728206 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.728295 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.728311 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.728354 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.728369 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:40Z","lastTransitionTime":"2025-11-28T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.830729 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.830776 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.830786 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.830804 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.830816 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:40Z","lastTransitionTime":"2025-11-28T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.844181 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4mdqn_8fe12df9-7deb-4f76-91cf-5b6b138d7675/kube-multus/0.log" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.844239 4647 generic.go:334] "Generic (PLEG): container finished" podID="8fe12df9-7deb-4f76-91cf-5b6b138d7675" containerID="c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9" exitCode=1 Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.844279 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4mdqn" event={"ID":"8fe12df9-7deb-4f76-91cf-5b6b138d7675","Type":"ContainerDied","Data":"c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.844906 4647 scope.go:117] "RemoveContainer" containerID="c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.867516 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.894269 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.914849 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.937923 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:21Z\\\",\\\"message\\\":\\\"0.0.1:29103\\\\\\\"\\\\nI1128 15:25:21.276286 6179 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276293 6179 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276321 6179 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-k7msq in node crc\\\\nI1128 15:25:21.276333 6179 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-k7msq after 0 failed attempt(s)\\\\nI1128 15:25:21.276340 6179 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-k7msq\\\\nF1128 15:25:21.276340 6179 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.938610 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.938642 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.938652 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.938672 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.938686 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:40Z","lastTransitionTime":"2025-11-28T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.958247 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.976372 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:40 crc kubenswrapper[4647]: I1128 15:25:40.995173 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.010707 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.026350 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.041840 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.041897 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.041912 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.041935 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.041949 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:41Z","lastTransitionTime":"2025-11-28T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.043559 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.054002 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.066185 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.076930 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.088384 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.101448 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.116955 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.131587 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:40Z\\\",\\\"message\\\":\\\"2025-11-28T15:24:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee\\\\n2025-11-28T15:24:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee to /host/opt/cni/bin/\\\\n2025-11-28T15:24:54Z [verbose] multus-daemon started\\\\n2025-11-28T15:24:54Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:25:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.144616 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.144658 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.144674 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.144696 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.144711 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:41Z","lastTransitionTime":"2025-11-28T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.247381 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.247439 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.247449 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.247467 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.247482 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:41Z","lastTransitionTime":"2025-11-28T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.361364 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.361480 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.361515 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.361548 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.361592 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:41Z","lastTransitionTime":"2025-11-28T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.464437 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.464484 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.464497 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.464516 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.464532 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:41Z","lastTransitionTime":"2025-11-28T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.567994 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.568050 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.568069 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.568096 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.568116 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:41Z","lastTransitionTime":"2025-11-28T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.670726 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.670764 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.670773 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.670789 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.670800 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:41Z","lastTransitionTime":"2025-11-28T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.773831 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.773894 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.773906 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.773926 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.773943 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:41Z","lastTransitionTime":"2025-11-28T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.850504 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4mdqn_8fe12df9-7deb-4f76-91cf-5b6b138d7675/kube-multus/0.log" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.850588 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4mdqn" event={"ID":"8fe12df9-7deb-4f76-91cf-5b6b138d7675","Type":"ContainerStarted","Data":"8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.865015 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.875997 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.876057 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.876070 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.876088 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.876101 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:41Z","lastTransitionTime":"2025-11-28T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.886686 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.899480 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.913892 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.928652 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.943729 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.961136 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.974157 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.979829 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.979868 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.979887 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.979906 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.979918 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:41Z","lastTransitionTime":"2025-11-28T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:41 crc kubenswrapper[4647]: I1128 15:25:41.989789 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.004172 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:40Z\\\",\\\"message\\\":\\\"2025-11-28T15:24:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee\\\\n2025-11-28T15:24:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee to /host/opt/cni/bin/\\\\n2025-11-28T15:24:54Z [verbose] multus-daemon started\\\\n2025-11-28T15:24:54Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:25:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.015380 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.029966 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.047172 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:21Z\\\",\\\"message\\\":\\\"0.0.1:29103\\\\\\\"\\\\nI1128 15:25:21.276286 6179 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276293 6179 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276321 6179 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-k7msq in node crc\\\\nI1128 15:25:21.276333 6179 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-k7msq after 0 failed attempt(s)\\\\nI1128 15:25:21.276340 6179 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-k7msq\\\\nF1128 15:25:21.276340 6179 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.059923 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.072403 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.082722 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.082754 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.082764 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.082780 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.082791 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:42Z","lastTransitionTime":"2025-11-28T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.084669 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.096993 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.185448 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.185761 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.185861 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.185973 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.186060 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:42Z","lastTransitionTime":"2025-11-28T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.289223 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.289265 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.289276 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.289292 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.289305 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:42Z","lastTransitionTime":"2025-11-28T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.392581 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.392639 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.392652 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.392675 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.392689 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:42Z","lastTransitionTime":"2025-11-28T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.393451 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.393489 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.393463 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:42 crc kubenswrapper[4647]: E1128 15:25:42.393585 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.393500 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:42 crc kubenswrapper[4647]: E1128 15:25:42.393724 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:42 crc kubenswrapper[4647]: E1128 15:25:42.393742 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:42 crc kubenswrapper[4647]: E1128 15:25:42.393795 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.495389 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.495464 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.495477 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.495495 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.495508 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:42Z","lastTransitionTime":"2025-11-28T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.599038 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.599497 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.599763 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.599960 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.600094 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:42Z","lastTransitionTime":"2025-11-28T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.703808 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.704253 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.704399 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.704607 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.704754 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:42Z","lastTransitionTime":"2025-11-28T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.808399 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.808501 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.808518 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.808539 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.808552 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:42Z","lastTransitionTime":"2025-11-28T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.911496 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.911547 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.911560 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.911578 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:42 crc kubenswrapper[4647]: I1128 15:25:42.911592 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:42Z","lastTransitionTime":"2025-11-28T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.015590 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.015692 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.015711 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.015731 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.015746 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:43Z","lastTransitionTime":"2025-11-28T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.118365 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.118429 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.118446 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.118476 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.118491 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:43Z","lastTransitionTime":"2025-11-28T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.221355 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.221394 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.221405 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.221436 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.221446 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:43Z","lastTransitionTime":"2025-11-28T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.323577 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.323623 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.323635 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.323659 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.323675 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:43Z","lastTransitionTime":"2025-11-28T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.427312 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.427383 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.427402 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.427466 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.427487 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:43Z","lastTransitionTime":"2025-11-28T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.530465 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.530534 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.530556 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.530581 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.530601 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:43Z","lastTransitionTime":"2025-11-28T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.633029 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.633082 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.633091 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.633109 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.633119 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:43Z","lastTransitionTime":"2025-11-28T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.735503 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.735558 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.735574 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.735596 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.735611 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:43Z","lastTransitionTime":"2025-11-28T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.837826 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.837875 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.837884 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.837902 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.837913 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:43Z","lastTransitionTime":"2025-11-28T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.940963 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.941200 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.941212 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.941228 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:43 crc kubenswrapper[4647]: I1128 15:25:43.941240 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:43Z","lastTransitionTime":"2025-11-28T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.044535 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.044603 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.044615 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.044630 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.044641 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:44Z","lastTransitionTime":"2025-11-28T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.148116 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.148178 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.148202 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.148232 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.148255 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:44Z","lastTransitionTime":"2025-11-28T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.250877 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.250928 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.250943 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.250965 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.250980 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:44Z","lastTransitionTime":"2025-11-28T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.353395 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.353550 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.353570 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.353599 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.353624 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:44Z","lastTransitionTime":"2025-11-28T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.394763 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:44 crc kubenswrapper[4647]: E1128 15:25:44.394959 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.395336 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:44 crc kubenswrapper[4647]: E1128 15:25:44.395520 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.395762 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:44 crc kubenswrapper[4647]: E1128 15:25:44.395864 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.396181 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:44 crc kubenswrapper[4647]: E1128 15:25:44.396296 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.456722 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.456768 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.456797 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.456812 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.456823 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:44Z","lastTransitionTime":"2025-11-28T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.560342 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.560457 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.560475 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.560497 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.560546 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:44Z","lastTransitionTime":"2025-11-28T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.663210 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.663261 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.663279 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.663304 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.663323 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:44Z","lastTransitionTime":"2025-11-28T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.766558 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.766641 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.766662 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.766695 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.766719 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:44Z","lastTransitionTime":"2025-11-28T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.869154 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.869287 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.869305 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.869332 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.869349 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:44Z","lastTransitionTime":"2025-11-28T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.972736 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.972798 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.972817 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.972846 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:44 crc kubenswrapper[4647]: I1128 15:25:44.972864 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:44Z","lastTransitionTime":"2025-11-28T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.076613 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.077528 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.077631 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.077669 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.077688 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:45Z","lastTransitionTime":"2025-11-28T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.181847 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.181914 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.181937 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.181970 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.181992 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:45Z","lastTransitionTime":"2025-11-28T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.286372 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.286478 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.286502 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.286528 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.286548 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:45Z","lastTransitionTime":"2025-11-28T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.390067 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.390171 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.390191 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.390225 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.390249 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:45Z","lastTransitionTime":"2025-11-28T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.494350 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.494565 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.494586 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.494610 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.494626 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:45Z","lastTransitionTime":"2025-11-28T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.598014 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.598111 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.598358 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.598517 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.598539 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:45Z","lastTransitionTime":"2025-11-28T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.701653 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.701745 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.701766 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.701797 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.701824 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:45Z","lastTransitionTime":"2025-11-28T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.804957 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.805015 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.805032 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.805060 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.805078 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:45Z","lastTransitionTime":"2025-11-28T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.907994 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.908095 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.908113 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.908147 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:45 crc kubenswrapper[4647]: I1128 15:25:45.908166 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:45Z","lastTransitionTime":"2025-11-28T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.011740 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.011836 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.011849 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.011863 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.011874 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:46Z","lastTransitionTime":"2025-11-28T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.114824 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.114871 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.114879 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.114897 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.114913 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:46Z","lastTransitionTime":"2025-11-28T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.218090 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.218234 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.218257 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.218278 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.218289 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:46Z","lastTransitionTime":"2025-11-28T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.321856 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.321933 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.321953 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.321985 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.322013 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:46Z","lastTransitionTime":"2025-11-28T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.394228 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.394283 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.394354 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.394402 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:46 crc kubenswrapper[4647]: E1128 15:25:46.394601 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:46 crc kubenswrapper[4647]: E1128 15:25:46.394780 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:46 crc kubenswrapper[4647]: E1128 15:25:46.394909 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:46 crc kubenswrapper[4647]: E1128 15:25:46.395277 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.424581 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.424632 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.424643 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.424661 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.424675 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:46Z","lastTransitionTime":"2025-11-28T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.528590 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.528657 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.528676 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.528703 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.528720 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:46Z","lastTransitionTime":"2025-11-28T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.631656 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.631701 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.631714 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.631732 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.631745 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:46Z","lastTransitionTime":"2025-11-28T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.736092 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.736213 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.736236 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.736389 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.736473 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:46Z","lastTransitionTime":"2025-11-28T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.840647 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.840715 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.840738 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.840767 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.840788 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:46Z","lastTransitionTime":"2025-11-28T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.943856 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.943931 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.943960 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.943990 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:46 crc kubenswrapper[4647]: I1128 15:25:46.944015 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:46Z","lastTransitionTime":"2025-11-28T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.047571 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.047624 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.047645 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.047670 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.047689 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.150999 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.151071 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.151091 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.151123 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.151144 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.254979 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.255097 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.255114 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.255134 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.255145 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.358764 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.358840 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.358861 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.358893 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.358913 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.394895 4647 scope.go:117] "RemoveContainer" containerID="8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.467739 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.467804 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.467826 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.467851 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.467868 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.571649 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.571751 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.571776 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.572361 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.572779 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.676191 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.676271 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.676295 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.676326 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.676345 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.742635 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.742680 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.742698 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.742721 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.742739 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: E1128 15:25:47.766316 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:47Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.773090 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.773146 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.773165 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.773193 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.773216 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: E1128 15:25:47.795360 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:47Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.803547 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.803608 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.803628 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.803666 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.803689 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: E1128 15:25:47.829647 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:47Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.837053 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.837116 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.837136 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.837163 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.837183 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: E1128 15:25:47.857036 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:47Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.864532 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.864618 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.864639 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.864666 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.864687 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: E1128 15:25:47.887398 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aecdc603-c25f-4d6e-8a41-c75b9586cce4\\\",\\\"systemUUID\\\":\\\"a49754f3-09f0-421c-a39e-92fe09c4d7bb\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:47Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:47 crc kubenswrapper[4647]: E1128 15:25:47.887589 4647 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.889606 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.889646 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.889663 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.889686 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.889704 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.994137 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.994221 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.994236 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.994269 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:47 crc kubenswrapper[4647]: I1128 15:25:47.994291 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:47Z","lastTransitionTime":"2025-11-28T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.099517 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.099579 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.099590 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.099611 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.099624 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:48Z","lastTransitionTime":"2025-11-28T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.202460 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.202528 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.202553 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.202581 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.202599 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:48Z","lastTransitionTime":"2025-11-28T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.305791 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.305865 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.305882 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.305909 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.305926 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:48Z","lastTransitionTime":"2025-11-28T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.393559 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.393609 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.393655 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.393745 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:48 crc kubenswrapper[4647]: E1128 15:25:48.393903 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:48 crc kubenswrapper[4647]: E1128 15:25:48.394097 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:48 crc kubenswrapper[4647]: E1128 15:25:48.394274 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:48 crc kubenswrapper[4647]: E1128 15:25:48.394379 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.408716 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.408759 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.408772 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.408792 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.408805 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:48Z","lastTransitionTime":"2025-11-28T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.511606 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.511654 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.511667 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.511687 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.511701 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:48Z","lastTransitionTime":"2025-11-28T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.614779 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.614838 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.614849 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.614868 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.614879 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:48Z","lastTransitionTime":"2025-11-28T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.718193 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.718251 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.718264 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.718284 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.718304 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:48Z","lastTransitionTime":"2025-11-28T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.821526 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.821563 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.821573 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.821588 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.821600 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:48Z","lastTransitionTime":"2025-11-28T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.879539 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/2.log" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.883738 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.884613 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.901995 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.915855 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.924050 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.924087 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.924116 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.924134 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.924145 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:48Z","lastTransitionTime":"2025-11-28T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.932460 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.950313 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.968655 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:48 crc kubenswrapper[4647]: I1128 15:25:48.984665 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.002193 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:40Z\\\",\\\"message\\\":\\\"2025-11-28T15:24:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee\\\\n2025-11-28T15:24:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee to /host/opt/cni/bin/\\\\n2025-11-28T15:24:54Z [verbose] multus-daemon started\\\\n2025-11-28T15:24:54Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:25:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.022126 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.029782 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.029836 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.029854 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.029881 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.029899 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:49Z","lastTransitionTime":"2025-11-28T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.040243 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.062237 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.085159 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.100900 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.118071 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.162148 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.162220 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.162242 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.162274 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.162297 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:49Z","lastTransitionTime":"2025-11-28T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.177834 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.201547 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:21Z\\\",\\\"message\\\":\\\"0.0.1:29103\\\\\\\"\\\\nI1128 15:25:21.276286 6179 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276293 6179 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276321 6179 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-k7msq in node crc\\\\nI1128 15:25:21.276333 6179 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-k7msq after 0 failed attempt(s)\\\\nI1128 15:25:21.276340 6179 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-k7msq\\\\nF1128 15:25:21.276340 6179 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.227818 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.249925 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.265974 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.266048 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.266072 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.266120 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.266146 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:49Z","lastTransitionTime":"2025-11-28T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.369996 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.370043 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.370063 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.370087 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.370105 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:49Z","lastTransitionTime":"2025-11-28T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.474136 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.474193 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.474213 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.474239 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.474259 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:49Z","lastTransitionTime":"2025-11-28T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.577955 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.578305 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.578399 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.578538 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.578629 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:49Z","lastTransitionTime":"2025-11-28T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.682317 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.682906 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.682998 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.683104 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.683202 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:49Z","lastTransitionTime":"2025-11-28T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.786722 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.786803 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.786830 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.786861 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.786886 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:49Z","lastTransitionTime":"2025-11-28T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.889563 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.889616 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.889628 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.889648 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.889664 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:49Z","lastTransitionTime":"2025-11-28T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.891392 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/3.log" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.892459 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/2.log" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.896729 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" exitCode=1 Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.896787 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60"} Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.896849 4647 scope.go:117] "RemoveContainer" containerID="8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.898204 4647 scope.go:117] "RemoveContainer" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:25:49 crc kubenswrapper[4647]: E1128 15:25:49.898654 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\"" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.926179 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.944114 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.966756 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.987301 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:40Z\\\",\\\"message\\\":\\\"2025-11-28T15:24:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee\\\\n2025-11-28T15:24:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee to /host/opt/cni/bin/\\\\n2025-11-28T15:24:54Z [verbose] multus-daemon started\\\\n2025-11-28T15:24:54Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:25:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.993476 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.993525 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.993538 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.993561 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:49 crc kubenswrapper[4647]: I1128 15:25:49.993575 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:49Z","lastTransitionTime":"2025-11-28T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.005069 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.023382 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.049720 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.085115 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:21Z\\\",\\\"message\\\":\\\"0.0.1:29103\\\\\\\"\\\\nI1128 15:25:21.276286 6179 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276293 6179 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276321 6179 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-k7msq in node crc\\\\nI1128 15:25:21.276333 6179 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-k7msq after 0 failed attempt(s)\\\\nI1128 15:25:21.276340 6179 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-k7msq\\\\nF1128 15:25:21.276340 6179 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:49Z\\\",\\\"message\\\":\\\"der:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:25:48.876375 6515 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:25:48.876327 6515 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:25:48.876487 6515 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:25:48.876658 6515 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.097055 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.097112 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.097132 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.097160 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.097184 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:50Z","lastTransitionTime":"2025-11-28T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.108618 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.130258 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.153898 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.170821 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.190764 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.200769 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.200829 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.200845 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.200866 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.200880 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:50Z","lastTransitionTime":"2025-11-28T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.211106 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.227465 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.245787 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.264241 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.304329 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.304397 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.304506 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.304534 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.304555 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:50Z","lastTransitionTime":"2025-11-28T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.394211 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.394356 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:50 crc kubenswrapper[4647]: E1128 15:25:50.394474 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.394526 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.395049 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:50 crc kubenswrapper[4647]: E1128 15:25:50.395227 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:50 crc kubenswrapper[4647]: E1128 15:25:50.395891 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:50 crc kubenswrapper[4647]: E1128 15:25:50.395748 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.408834 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.409223 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.409368 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.409576 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.409732 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:50Z","lastTransitionTime":"2025-11-28T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.422539 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.425062 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.447130 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.464224 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.483704 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.505335 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.520641 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.520702 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.520722 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.520763 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.520784 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:50Z","lastTransitionTime":"2025-11-28T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.527024 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.546080 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.567455 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.591031 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.613479 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:40Z\\\",\\\"message\\\":\\\"2025-11-28T15:24:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee\\\\n2025-11-28T15:24:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee to /host/opt/cni/bin/\\\\n2025-11-28T15:24:54Z [verbose] multus-daemon started\\\\n2025-11-28T15:24:54Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:25:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.624437 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.624506 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.624524 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.624549 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.624569 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:50Z","lastTransitionTime":"2025-11-28T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.646805 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8700aa0d466a48a50356f3287b31ecc6bd5c962e93b9ede0f966ac3edc24cf45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:21Z\\\",\\\"message\\\":\\\"0.0.1:29103\\\\\\\"\\\\nI1128 15:25:21.276286 6179 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276293 6179 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-k7msq\\\\nI1128 15:25:21.276321 6179 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-k7msq in node crc\\\\nI1128 15:25:21.276333 6179 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-k7msq after 0 failed attempt(s)\\\\nI1128 15:25:21.276340 6179 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-k7msq\\\\nF1128 15:25:21.276340 6179 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:49Z\\\",\\\"message\\\":\\\"der:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:25:48.876375 6515 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:25:48.876327 6515 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:25:48.876487 6515 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:25:48.876658 6515 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.667616 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.689479 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.710045 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.729202 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.729249 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.729287 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.729307 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.729320 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:50Z","lastTransitionTime":"2025-11-28T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.730069 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.751044 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.776397 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.832208 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.832282 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.832302 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.832329 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.832350 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:50Z","lastTransitionTime":"2025-11-28T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.904646 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/3.log" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.911239 4647 scope.go:117] "RemoveContainer" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:25:50 crc kubenswrapper[4647]: E1128 15:25:50.911568 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\"" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.931293 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a79b0b39-cffb-4ac3-a526-837c6aa70616\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-78srd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:06Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-cz6sq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.937214 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.937304 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.937325 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.937370 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.937386 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:50Z","lastTransitionTime":"2025-11-28T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.953310 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b86f7a5e5a64164d672f68fd9a0c8b39915d8ca2290685d591043e7ff660ecff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.973483 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81094fb3-a4ad-4cd5-92a9-6a7643e94e6f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9bd30ed6f3c6a1bf6e6a6cb9187da71c57b2b82749edf29ef58a5838bfe5d1d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8a8135ecdedd6dfd3640007acbeff89db4847c000555ff062fc72e00ce8eca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e4bf69a2290d5d0f23eeeeb816f61946be0196a16a98c79f415c57bfd3f6200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://35764e1e575fc640e9082f09a2b776a9228bbfe9b046dddadcc9f33230e3e803\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:50 crc kubenswrapper[4647]: I1128 15:25:50.988130 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1fd35925aa8903cd9ea200016fe66e70a96fd75f4bb92874e9f94ed29556b3e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.002474 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4mdqn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe12df9-7deb-4f76-91cf-5b6b138d7675\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:40Z\\\",\\\"message\\\":\\\"2025-11-28T15:24:53+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee\\\\n2025-11-28T15:24:53+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5c498cdb-3cf2-4fbc-a2cc-6ecf95c11eee to /host/opt/cni/bin/\\\\n2025-11-28T15:24:54Z [verbose] multus-daemon started\\\\n2025-11-28T15:24:54Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:25:39Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wpkrt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4mdqn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.023612 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4123334d-7721-4312-b141-97316a1cb2aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0407d283967587af953a435a1ff4457ddd3bad2218d35c1bf30be66c1f4e720\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96e782d85882fd17213d327df3b531bdf4a96210b4e0a0ae9a2392ca66622825\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce4a8621889ead81435693a6600df8f759b494a8fc38931c45f645b754b24549\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.040348 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.040443 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.040471 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.040498 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.040520 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:51Z","lastTransitionTime":"2025-11-28T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.048095 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.067864 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.087734 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-k7msq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02426497-e7d0-4982-a129-e5715ad55cd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b30bb932f4b49d1692cc5e65fc7588ffa7fdcc072b57bb6b16b1fdaff3757487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rsvwn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-k7msq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.104779 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"008f163b-b2fe-4238-90b5-96f0d89f3fb5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af7c8cc607609954508a2c8513d11e521dabdd23bd267750e42336785f95038d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qvh5c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7mwt4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.124562 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"817b3066-f5d4-49c6-a9b0-f621261d5f81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ec97e9b252295a5c622219892912172f44ad5b5489c66285064c3ce89c426e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a54f1b6fb56c024de4591396b3fea0e721aaf35014fa9ee2dcdcdf9d82dff9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1eee4588caec5600bf3a3d30968bda872649b30a7dfa3eb287eba31b06ddb7ee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://689e22ca0f67089cb8cfc94d0f7095ee5143551655f144a5ad62f125dd45bec9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://07876a231613e1085b3d9ccdc3625df14699690587483a79134b39de1ed79330\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2b39462a9d35b5d00e7c34fc8843a944494e400ab1ef113d408441b6aaaca39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9ff83ddbaa3719044b9c64f5b0fb4689cba344f8607680aeac9f015ecef496b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:25:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q8229\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-l5xdk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.143023 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.143055 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.143067 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.143084 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.143095 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:51Z","lastTransitionTime":"2025-11-28T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.150993 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de25f5ba-91da-4a77-8747-ec3a56a141df\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:25:49Z\\\",\\\"message\\\":\\\"der:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:25:48.876375 6515 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:25:48.876327 6515 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:25:48.876487 6515 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:25:48.876658 6515 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:25:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whrxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-c76pb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.172252 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b508f6cb-c167-4c71-8b7d-a6014ca9fafe\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.193263 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.220105 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75e23a343c85499a5d455e54239acb6bfee3fb1976460b30f5ae5874ea861ebf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://90693da9a91419567a2b1f4dc48f1bae38d332c1eceb961e58238c672bdb8734\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.236193 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dg57z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0bf7a521-f9a3-4e6b-a455-337a6a6db730\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bfb0f60fbefcda09a1a2c4acb0734d91e42753f65a4ab62e368c4f681526711c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xpfgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dg57z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.246509 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.247355 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.247538 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.247697 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.247845 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:51Z","lastTransitionTime":"2025-11-28T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.249258 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3c61497d-3cda-498b-94fc-372dc6030924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:25:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2960c7634db75d3b827fdcb44e4a545b7b95df01398290a0ac8ddfbc7d3955c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cc31203ffe038b1436798503e261b5bb897730e418f7adede382205ee12b73f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:25:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxt6r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:25:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-55cn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.282854 4647 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d61f1fbd-864f-4db7-8395-6f2119c5f8b6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:24:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f6d76d68cb88053d117ce151dcff87171ad0b1757334e4e69986ac81301e2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6df182c2cf4ed05c0b48e48ac514987a133ab4252b0699d6f25dd3c81992b13a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafb3cd99986cf0e02dcfe091393dcfa21e001d82665695bab283e7d1925651d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a17ce56c81f992b4217a104bca435e5aed254f83fe91c74cf838ee9057c4503c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eeae41e209c03a7b7154c064cd6238a3ae4f4f9b91204f355bab8cb49c322793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:24:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81caabfdaec82723b8ce4ee38f19077b584906e5cbb590d3bbaeee44d77dd054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81caabfdaec82723b8ce4ee38f19077b584906e5cbb590d3bbaeee44d77dd054\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ddf40314e95ad5216f75c3bc1df4a668417c48792aece0c27a003e8347576cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ddf40314e95ad5216f75c3bc1df4a668417c48792aece0c27a003e8347576cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b148d7a9a39458095947e6d65d770b19243421de6e9383095ded1703018b069a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b148d7a9a39458095947e6d65d770b19243421de6e9383095ded1703018b069a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:24:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:24:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:24:30Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:25:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.350258 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.350295 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.350303 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.350319 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.350329 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:51Z","lastTransitionTime":"2025-11-28T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.453383 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.453468 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.453482 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.453507 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.453522 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:51Z","lastTransitionTime":"2025-11-28T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.556353 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.556426 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.556440 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.556460 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.556473 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:51Z","lastTransitionTime":"2025-11-28T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.660089 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.660175 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.660194 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.660226 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.660249 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:51Z","lastTransitionTime":"2025-11-28T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.763118 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.763158 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.763170 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.763188 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.763201 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:51Z","lastTransitionTime":"2025-11-28T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.866941 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.867003 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.867024 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.867048 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.867066 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:51Z","lastTransitionTime":"2025-11-28T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.970706 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.970804 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.970829 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.970861 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:51 crc kubenswrapper[4647]: I1128 15:25:51.970885 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:51Z","lastTransitionTime":"2025-11-28T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.074575 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.074666 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.074698 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.074735 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.074762 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:52Z","lastTransitionTime":"2025-11-28T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.178894 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.179271 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.179338 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.179406 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.179511 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:52Z","lastTransitionTime":"2025-11-28T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.283653 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.283743 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.283922 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.283959 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.283979 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:52Z","lastTransitionTime":"2025-11-28T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.387918 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.387972 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.387983 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.388002 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.388013 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:52Z","lastTransitionTime":"2025-11-28T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.394468 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.394511 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.394544 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:52 crc kubenswrapper[4647]: E1128 15:25:52.394650 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:52 crc kubenswrapper[4647]: E1128 15:25:52.394948 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:52 crc kubenswrapper[4647]: E1128 15:25:52.395117 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.395213 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:52 crc kubenswrapper[4647]: E1128 15:25:52.395380 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.491842 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.491921 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.491934 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.491955 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.491970 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:52Z","lastTransitionTime":"2025-11-28T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.595842 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.595915 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.595933 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.595960 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.595981 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:52Z","lastTransitionTime":"2025-11-28T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.698678 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.698746 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.698765 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.698789 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.698806 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:52Z","lastTransitionTime":"2025-11-28T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.802301 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.802363 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.802380 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.802404 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.802437 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:52Z","lastTransitionTime":"2025-11-28T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.905076 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.905125 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.905134 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.905154 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:52 crc kubenswrapper[4647]: I1128 15:25:52.905167 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:52Z","lastTransitionTime":"2025-11-28T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.007828 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.007879 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.007920 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.007943 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.007954 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:53Z","lastTransitionTime":"2025-11-28T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.111676 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.111747 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.111767 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.111791 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.111811 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:53Z","lastTransitionTime":"2025-11-28T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.214214 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.214258 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.214272 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.214293 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.214305 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:53Z","lastTransitionTime":"2025-11-28T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.317117 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.317728 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.317747 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.317775 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.317799 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:53Z","lastTransitionTime":"2025-11-28T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.420990 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.421053 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.421072 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.421098 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.421116 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:53Z","lastTransitionTime":"2025-11-28T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.524557 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.524609 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.524627 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.524651 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.524668 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:53Z","lastTransitionTime":"2025-11-28T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.628329 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.628391 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.628408 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.628471 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.628489 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:53Z","lastTransitionTime":"2025-11-28T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.732039 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.732096 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.732118 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.732147 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.732167 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:53Z","lastTransitionTime":"2025-11-28T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.835076 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.835141 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.835156 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.835178 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.835192 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:53Z","lastTransitionTime":"2025-11-28T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.937912 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.937959 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.937970 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.937987 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:53 crc kubenswrapper[4647]: I1128 15:25:53.938000 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:53Z","lastTransitionTime":"2025-11-28T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.041654 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.041762 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.041816 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.041851 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.041872 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:54Z","lastTransitionTime":"2025-11-28T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.146886 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.146996 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.147013 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.147032 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.147042 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:54Z","lastTransitionTime":"2025-11-28T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.249950 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.250004 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.250016 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.250037 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.250055 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:54Z","lastTransitionTime":"2025-11-28T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.353516 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.353578 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.353588 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.353607 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.353621 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:54Z","lastTransitionTime":"2025-11-28T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.393835 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.393919 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.393917 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.394004 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.394188 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.394296 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.394417 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.394567 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.456977 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.457038 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.457055 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.457082 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.457101 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:54Z","lastTransitionTime":"2025-11-28T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.526585 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.526814 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.526910 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.526859375 +0000 UTC m=+148.374465836 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.526987 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527009 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527023 4647 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.527011 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527082 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.527064101 +0000 UTC m=+148.374670532 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.527110 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527224 4647 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527294 4647 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527309 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.527293607 +0000 UTC m=+148.374900088 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527355 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.527336719 +0000 UTC m=+148.374943180 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.527534 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527685 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527703 4647 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527713 4647 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:54 crc kubenswrapper[4647]: E1128 15:25:54.527759 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.52774619 +0000 UTC m=+148.375352631 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.559924 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.559988 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.560004 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.560032 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.560052 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:54Z","lastTransitionTime":"2025-11-28T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.664159 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.664261 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.664289 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.664324 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.664351 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:54Z","lastTransitionTime":"2025-11-28T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.769319 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.769382 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.769399 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.769445 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.769466 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:54Z","lastTransitionTime":"2025-11-28T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.872736 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.872792 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.872805 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.872826 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.872837 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:54Z","lastTransitionTime":"2025-11-28T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.976362 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.976484 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.976510 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.976540 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:54 crc kubenswrapper[4647]: I1128 15:25:54.976560 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:54Z","lastTransitionTime":"2025-11-28T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.079344 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.079456 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.079478 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.079510 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.079534 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:55Z","lastTransitionTime":"2025-11-28T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.183188 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.183264 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.183290 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.183321 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.183347 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:55Z","lastTransitionTime":"2025-11-28T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.286960 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.287033 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.287062 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.287095 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.287113 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:55Z","lastTransitionTime":"2025-11-28T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.390140 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.390198 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.390211 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.390234 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.390248 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:55Z","lastTransitionTime":"2025-11-28T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.493690 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.493775 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.493794 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.493828 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.493847 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:55Z","lastTransitionTime":"2025-11-28T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.597756 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.597829 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.597849 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.597877 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.597897 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:55Z","lastTransitionTime":"2025-11-28T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.702048 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.702128 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.702149 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.702180 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.702206 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:55Z","lastTransitionTime":"2025-11-28T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.806785 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.806893 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.806947 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.806976 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.807032 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:55Z","lastTransitionTime":"2025-11-28T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.911940 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.911992 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.912005 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.912028 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:55 crc kubenswrapper[4647]: I1128 15:25:55.912044 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:55Z","lastTransitionTime":"2025-11-28T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.015106 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.015150 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.015167 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.015183 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.015214 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:56Z","lastTransitionTime":"2025-11-28T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.118180 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.118265 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.118285 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.118339 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.118356 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:56Z","lastTransitionTime":"2025-11-28T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.221050 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.221116 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.221127 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.221148 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.221163 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:56Z","lastTransitionTime":"2025-11-28T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.325498 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.325630 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.325709 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.325784 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.325817 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:56Z","lastTransitionTime":"2025-11-28T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.394040 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.394092 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:56 crc kubenswrapper[4647]: E1128 15:25:56.394709 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.395196 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:56 crc kubenswrapper[4647]: E1128 15:25:56.395235 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:56 crc kubenswrapper[4647]: E1128 15:25:56.395594 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.395946 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:56 crc kubenswrapper[4647]: E1128 15:25:56.396350 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.412997 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.430585 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.430862 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.431341 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.431689 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.432045 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:56Z","lastTransitionTime":"2025-11-28T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.535613 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.535930 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.536066 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.536169 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.536257 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:56Z","lastTransitionTime":"2025-11-28T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.639833 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.639891 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.639900 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.639917 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.639927 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:56Z","lastTransitionTime":"2025-11-28T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.743186 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.743222 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.743230 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.743244 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.743257 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:56Z","lastTransitionTime":"2025-11-28T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.846643 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.846689 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.846705 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.846729 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.846746 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:56Z","lastTransitionTime":"2025-11-28T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.950010 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.950105 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.950202 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.950279 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:56 crc kubenswrapper[4647]: I1128 15:25:56.950304 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:56Z","lastTransitionTime":"2025-11-28T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.054300 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.054368 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.054391 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.054476 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.054505 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:57Z","lastTransitionTime":"2025-11-28T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.158599 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.158660 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.158680 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.158709 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.158734 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:57Z","lastTransitionTime":"2025-11-28T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.262636 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.262707 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.262730 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.262761 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.262830 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:57Z","lastTransitionTime":"2025-11-28T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.365660 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.365720 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.365739 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.365763 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.365779 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:57Z","lastTransitionTime":"2025-11-28T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.468446 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.469086 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.469178 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.469259 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.469322 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:57Z","lastTransitionTime":"2025-11-28T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.571679 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.571735 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.571750 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.571768 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.571782 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:57Z","lastTransitionTime":"2025-11-28T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.675975 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.676044 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.676073 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.676107 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.676131 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:57Z","lastTransitionTime":"2025-11-28T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.779744 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.779796 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.779813 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.779837 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.779858 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:57Z","lastTransitionTime":"2025-11-28T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.882664 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.882717 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.882736 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.882760 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.882775 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:57Z","lastTransitionTime":"2025-11-28T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.984986 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.985030 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.985039 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.985055 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:57 crc kubenswrapper[4647]: I1128 15:25:57.985066 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:57Z","lastTransitionTime":"2025-11-28T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.087794 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.087857 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.087867 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.087884 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.087893 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:58Z","lastTransitionTime":"2025-11-28T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.190122 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.190189 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.190197 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.190211 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.190221 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:58Z","lastTransitionTime":"2025-11-28T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.223912 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.223972 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.223982 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.224006 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.224016 4647 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:25:58Z","lastTransitionTime":"2025-11-28T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.290062 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529"] Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.290793 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.294700 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.294815 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.295764 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.297717 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.324261 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=61.324233366 podStartE2EDuration="1m1.324233366s" podCreationTimestamp="2025-11-28 15:24:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:58.324156723 +0000 UTC m=+88.171763164" watchObservedRunningTime="2025-11-28 15:25:58.324233366 +0000 UTC m=+88.171839797" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.356496 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=40.356422066 podStartE2EDuration="40.356422066s" podCreationTimestamp="2025-11-28 15:25:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:58.340905792 +0000 UTC m=+88.188512223" watchObservedRunningTime="2025-11-28 15:25:58.356422066 +0000 UTC m=+88.204028497" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.372574 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.372646 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.372673 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.372757 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.372779 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.387850 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-k7msq" podStartSLOduration=67.387818803 podStartE2EDuration="1m7.387818803s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:58.387258248 +0000 UTC m=+88.234864669" watchObservedRunningTime="2025-11-28 15:25:58.387818803 +0000 UTC m=+88.235425234" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.387996 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-4mdqn" podStartSLOduration=67.387988728 podStartE2EDuration="1m7.387988728s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:58.373007009 +0000 UTC m=+88.220613440" watchObservedRunningTime="2025-11-28 15:25:58.387988728 +0000 UTC m=+88.235595179" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.393620 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.393716 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:25:58 crc kubenswrapper[4647]: E1128 15:25:58.394159 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.393793 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:25:58 crc kubenswrapper[4647]: E1128 15:25:58.394286 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.393761 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:25:58 crc kubenswrapper[4647]: E1128 15:25:58.394353 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:25:58 crc kubenswrapper[4647]: E1128 15:25:58.394047 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.473892 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.473958 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.473987 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.474021 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.474041 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.474124 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.475273 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.475724 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.483906 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.502630 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f76aaf6-1612-4c4d-9861-d7b75d3542c5-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7n529\" (UID: \"4f76aaf6-1612-4c4d-9861-d7b75d3542c5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.611036 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" Nov 28 15:25:58 crc kubenswrapper[4647]: W1128 15:25:58.630970 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f76aaf6_1612_4c4d_9861_d7b75d3542c5.slice/crio-69afc16cec4daadabd0d42913a8e22d19ee825dfd85ad14a314a92686be91ff4 WatchSource:0}: Error finding container 69afc16cec4daadabd0d42913a8e22d19ee825dfd85ad14a314a92686be91ff4: Status 404 returned error can't find the container with id 69afc16cec4daadabd0d42913a8e22d19ee825dfd85ad14a314a92686be91ff4 Nov 28 15:25:58 crc kubenswrapper[4647]: I1128 15:25:58.938493 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" event={"ID":"4f76aaf6-1612-4c4d-9861-d7b75d3542c5","Type":"ContainerStarted","Data":"69afc16cec4daadabd0d42913a8e22d19ee825dfd85ad14a314a92686be91ff4"} Nov 28 15:25:59 crc kubenswrapper[4647]: I1128 15:25:59.135997 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podStartSLOduration=68.135972891 podStartE2EDuration="1m8.135972891s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:58.406837915 +0000 UTC m=+88.254444346" watchObservedRunningTime="2025-11-28 15:25:59.135972891 +0000 UTC m=+88.983579312" Nov 28 15:25:59 crc kubenswrapper[4647]: I1128 15:25:59.178836 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-l5xdk" podStartSLOduration=68.178813139 podStartE2EDuration="1m8.178813139s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:59.136481175 +0000 UTC m=+88.984087596" watchObservedRunningTime="2025-11-28 15:25:59.178813139 +0000 UTC m=+89.026419560" Nov 28 15:25:59 crc kubenswrapper[4647]: I1128 15:25:59.197221 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=69.197196153 podStartE2EDuration="1m9.197196153s" podCreationTimestamp="2025-11-28 15:24:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:59.196042771 +0000 UTC m=+89.043649202" watchObservedRunningTime="2025-11-28 15:25:59.197196153 +0000 UTC m=+89.044802594" Nov 28 15:25:59 crc kubenswrapper[4647]: I1128 15:25:59.228691 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=3.228664233 podStartE2EDuration="3.228664233s" podCreationTimestamp="2025-11-28 15:25:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:59.21281827 +0000 UTC m=+89.060424691" watchObservedRunningTime="2025-11-28 15:25:59.228664233 +0000 UTC m=+89.076270664" Nov 28 15:25:59 crc kubenswrapper[4647]: I1128 15:25:59.358970 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=9.358948885 podStartE2EDuration="9.358948885s" podCreationTimestamp="2025-11-28 15:25:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:59.357833284 +0000 UTC m=+89.205439715" watchObservedRunningTime="2025-11-28 15:25:59.358948885 +0000 UTC m=+89.206555306" Nov 28 15:25:59 crc kubenswrapper[4647]: I1128 15:25:59.359792 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-55cn2" podStartSLOduration=67.359786529 podStartE2EDuration="1m7.359786529s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:59.329305928 +0000 UTC m=+89.176912359" watchObservedRunningTime="2025-11-28 15:25:59.359786529 +0000 UTC m=+89.207392950" Nov 28 15:25:59 crc kubenswrapper[4647]: I1128 15:25:59.401839 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-dg57z" podStartSLOduration=68.401814924 podStartE2EDuration="1m8.401814924s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:59.400908858 +0000 UTC m=+89.248515279" watchObservedRunningTime="2025-11-28 15:25:59.401814924 +0000 UTC m=+89.249421355" Nov 28 15:25:59 crc kubenswrapper[4647]: I1128 15:25:59.943987 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" event={"ID":"4f76aaf6-1612-4c4d-9861-d7b75d3542c5","Type":"ContainerStarted","Data":"b443c192baad41c26f5590ea69bf6073d9c3faa63a44921f05b5be8eb0dd9f14"} Nov 28 15:25:59 crc kubenswrapper[4647]: I1128 15:25:59.962137 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7n529" podStartSLOduration=68.962110589 podStartE2EDuration="1m8.962110589s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:59.960517445 +0000 UTC m=+89.808123896" watchObservedRunningTime="2025-11-28 15:25:59.962110589 +0000 UTC m=+89.809717020" Nov 28 15:26:00 crc kubenswrapper[4647]: I1128 15:26:00.394493 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:00 crc kubenswrapper[4647]: E1128 15:26:00.394635 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:00 crc kubenswrapper[4647]: I1128 15:26:00.394943 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:00 crc kubenswrapper[4647]: E1128 15:26:00.395032 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:00 crc kubenswrapper[4647]: I1128 15:26:00.395222 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:00 crc kubenswrapper[4647]: E1128 15:26:00.395300 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:00 crc kubenswrapper[4647]: I1128 15:26:00.395566 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:00 crc kubenswrapper[4647]: E1128 15:26:00.395658 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:02 crc kubenswrapper[4647]: I1128 15:26:02.393989 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:02 crc kubenswrapper[4647]: I1128 15:26:02.394030 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:02 crc kubenswrapper[4647]: I1128 15:26:02.394295 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:02 crc kubenswrapper[4647]: E1128 15:26:02.394630 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:02 crc kubenswrapper[4647]: E1128 15:26:02.394220 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:02 crc kubenswrapper[4647]: E1128 15:26:02.394808 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:02 crc kubenswrapper[4647]: I1128 15:26:02.394030 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:02 crc kubenswrapper[4647]: E1128 15:26:02.395924 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:04 crc kubenswrapper[4647]: I1128 15:26:04.394178 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:04 crc kubenswrapper[4647]: I1128 15:26:04.394310 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:04 crc kubenswrapper[4647]: E1128 15:26:04.394311 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:04 crc kubenswrapper[4647]: I1128 15:26:04.395950 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:04 crc kubenswrapper[4647]: E1128 15:26:04.396307 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:04 crc kubenswrapper[4647]: I1128 15:26:04.398623 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:04 crc kubenswrapper[4647]: I1128 15:26:04.399672 4647 scope.go:117] "RemoveContainer" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:26:04 crc kubenswrapper[4647]: E1128 15:26:04.399914 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\"" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" Nov 28 15:26:04 crc kubenswrapper[4647]: E1128 15:26:04.400194 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:04 crc kubenswrapper[4647]: E1128 15:26:04.400825 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:06 crc kubenswrapper[4647]: I1128 15:26:06.393682 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:06 crc kubenswrapper[4647]: I1128 15:26:06.393776 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:06 crc kubenswrapper[4647]: I1128 15:26:06.393808 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:06 crc kubenswrapper[4647]: E1128 15:26:06.393951 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:06 crc kubenswrapper[4647]: E1128 15:26:06.394078 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:06 crc kubenswrapper[4647]: E1128 15:26:06.394153 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:06 crc kubenswrapper[4647]: I1128 15:26:06.393732 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:06 crc kubenswrapper[4647]: E1128 15:26:06.395528 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:08 crc kubenswrapper[4647]: I1128 15:26:08.393693 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:08 crc kubenswrapper[4647]: I1128 15:26:08.393826 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:08 crc kubenswrapper[4647]: I1128 15:26:08.393895 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:08 crc kubenswrapper[4647]: I1128 15:26:08.393908 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:08 crc kubenswrapper[4647]: E1128 15:26:08.394056 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:08 crc kubenswrapper[4647]: E1128 15:26:08.394273 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:08 crc kubenswrapper[4647]: E1128 15:26:08.394882 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:08 crc kubenswrapper[4647]: E1128 15:26:08.395002 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:10 crc kubenswrapper[4647]: I1128 15:26:10.393712 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:10 crc kubenswrapper[4647]: I1128 15:26:10.393772 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:10 crc kubenswrapper[4647]: I1128 15:26:10.393963 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:10 crc kubenswrapper[4647]: E1128 15:26:10.395878 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:10 crc kubenswrapper[4647]: I1128 15:26:10.396216 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:10 crc kubenswrapper[4647]: E1128 15:26:10.396369 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:10 crc kubenswrapper[4647]: E1128 15:26:10.396788 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:10 crc kubenswrapper[4647]: E1128 15:26:10.397571 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:10 crc kubenswrapper[4647]: I1128 15:26:10.761943 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:10 crc kubenswrapper[4647]: E1128 15:26:10.762169 4647 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:26:10 crc kubenswrapper[4647]: E1128 15:26:10.762273 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs podName:a79b0b39-cffb-4ac3-a526-837c6aa70616 nodeName:}" failed. No retries permitted until 2025-11-28 15:27:14.762253905 +0000 UTC m=+164.609860326 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs") pod "network-metrics-daemon-cz6sq" (UID: "a79b0b39-cffb-4ac3-a526-837c6aa70616") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:26:12 crc kubenswrapper[4647]: I1128 15:26:12.393568 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:12 crc kubenswrapper[4647]: I1128 15:26:12.393633 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:12 crc kubenswrapper[4647]: I1128 15:26:12.393662 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:12 crc kubenswrapper[4647]: I1128 15:26:12.393568 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:12 crc kubenswrapper[4647]: E1128 15:26:12.393716 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:12 crc kubenswrapper[4647]: E1128 15:26:12.393791 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:12 crc kubenswrapper[4647]: E1128 15:26:12.393844 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:12 crc kubenswrapper[4647]: E1128 15:26:12.393915 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:14 crc kubenswrapper[4647]: I1128 15:26:14.394264 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:14 crc kubenswrapper[4647]: E1128 15:26:14.394703 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:14 crc kubenswrapper[4647]: I1128 15:26:14.394269 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:14 crc kubenswrapper[4647]: I1128 15:26:14.394267 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:14 crc kubenswrapper[4647]: E1128 15:26:14.394779 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:14 crc kubenswrapper[4647]: I1128 15:26:14.394292 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:14 crc kubenswrapper[4647]: E1128 15:26:14.394845 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:14 crc kubenswrapper[4647]: E1128 15:26:14.394905 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:16 crc kubenswrapper[4647]: I1128 15:26:16.394187 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:16 crc kubenswrapper[4647]: I1128 15:26:16.394252 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:16 crc kubenswrapper[4647]: I1128 15:26:16.394187 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:16 crc kubenswrapper[4647]: E1128 15:26:16.394406 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:16 crc kubenswrapper[4647]: I1128 15:26:16.394530 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:16 crc kubenswrapper[4647]: E1128 15:26:16.394520 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:16 crc kubenswrapper[4647]: E1128 15:26:16.394598 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:16 crc kubenswrapper[4647]: E1128 15:26:16.394697 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:18 crc kubenswrapper[4647]: I1128 15:26:18.393625 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:18 crc kubenswrapper[4647]: I1128 15:26:18.393765 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:18 crc kubenswrapper[4647]: I1128 15:26:18.393776 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:18 crc kubenswrapper[4647]: E1128 15:26:18.394189 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:18 crc kubenswrapper[4647]: I1128 15:26:18.394215 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:18 crc kubenswrapper[4647]: E1128 15:26:18.394278 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:18 crc kubenswrapper[4647]: E1128 15:26:18.394373 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:18 crc kubenswrapper[4647]: E1128 15:26:18.394591 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:18 crc kubenswrapper[4647]: I1128 15:26:18.396227 4647 scope.go:117] "RemoveContainer" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:26:18 crc kubenswrapper[4647]: E1128 15:26:18.396685 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-c76pb_openshift-ovn-kubernetes(de25f5ba-91da-4a77-8747-ec3a56a141df)\"" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" Nov 28 15:26:20 crc kubenswrapper[4647]: I1128 15:26:20.394391 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:20 crc kubenswrapper[4647]: I1128 15:26:20.394456 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:20 crc kubenswrapper[4647]: I1128 15:26:20.395924 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:20 crc kubenswrapper[4647]: E1128 15:26:20.396107 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:20 crc kubenswrapper[4647]: I1128 15:26:20.396041 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:20 crc kubenswrapper[4647]: E1128 15:26:20.396324 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:20 crc kubenswrapper[4647]: E1128 15:26:20.396551 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:20 crc kubenswrapper[4647]: E1128 15:26:20.396678 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:22 crc kubenswrapper[4647]: I1128 15:26:22.394229 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:22 crc kubenswrapper[4647]: I1128 15:26:22.394229 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:22 crc kubenswrapper[4647]: I1128 15:26:22.394248 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:22 crc kubenswrapper[4647]: I1128 15:26:22.394332 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:22 crc kubenswrapper[4647]: E1128 15:26:22.394452 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:22 crc kubenswrapper[4647]: E1128 15:26:22.394506 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:22 crc kubenswrapper[4647]: E1128 15:26:22.394524 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:22 crc kubenswrapper[4647]: E1128 15:26:22.394567 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:24 crc kubenswrapper[4647]: I1128 15:26:24.393493 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:24 crc kubenswrapper[4647]: I1128 15:26:24.393551 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:24 crc kubenswrapper[4647]: I1128 15:26:24.393494 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:24 crc kubenswrapper[4647]: E1128 15:26:24.393660 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:24 crc kubenswrapper[4647]: E1128 15:26:24.393747 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:24 crc kubenswrapper[4647]: E1128 15:26:24.393829 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:24 crc kubenswrapper[4647]: I1128 15:26:24.393533 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:24 crc kubenswrapper[4647]: E1128 15:26:24.395553 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:26 crc kubenswrapper[4647]: I1128 15:26:26.393631 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:26 crc kubenswrapper[4647]: E1128 15:26:26.393777 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:26 crc kubenswrapper[4647]: I1128 15:26:26.394012 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:26 crc kubenswrapper[4647]: E1128 15:26:26.394064 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:26 crc kubenswrapper[4647]: I1128 15:26:26.394185 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:26 crc kubenswrapper[4647]: E1128 15:26:26.394235 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:26 crc kubenswrapper[4647]: I1128 15:26:26.394373 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:26 crc kubenswrapper[4647]: E1128 15:26:26.394433 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:27 crc kubenswrapper[4647]: I1128 15:26:27.043819 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4mdqn_8fe12df9-7deb-4f76-91cf-5b6b138d7675/kube-multus/1.log" Nov 28 15:26:27 crc kubenswrapper[4647]: I1128 15:26:27.044792 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4mdqn_8fe12df9-7deb-4f76-91cf-5b6b138d7675/kube-multus/0.log" Nov 28 15:26:27 crc kubenswrapper[4647]: I1128 15:26:27.044869 4647 generic.go:334] "Generic (PLEG): container finished" podID="8fe12df9-7deb-4f76-91cf-5b6b138d7675" containerID="8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda" exitCode=1 Nov 28 15:26:27 crc kubenswrapper[4647]: I1128 15:26:27.044917 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4mdqn" event={"ID":"8fe12df9-7deb-4f76-91cf-5b6b138d7675","Type":"ContainerDied","Data":"8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda"} Nov 28 15:26:27 crc kubenswrapper[4647]: I1128 15:26:27.044969 4647 scope.go:117] "RemoveContainer" containerID="c0ac4ae09b71cbb74eb9936cf81a892e1789fb2913986ebbff2c03778b4106e9" Nov 28 15:26:27 crc kubenswrapper[4647]: I1128 15:26:27.046230 4647 scope.go:117] "RemoveContainer" containerID="8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda" Nov 28 15:26:27 crc kubenswrapper[4647]: E1128 15:26:27.046554 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-4mdqn_openshift-multus(8fe12df9-7deb-4f76-91cf-5b6b138d7675)\"" pod="openshift-multus/multus-4mdqn" podUID="8fe12df9-7deb-4f76-91cf-5b6b138d7675" Nov 28 15:26:28 crc kubenswrapper[4647]: I1128 15:26:28.049108 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4mdqn_8fe12df9-7deb-4f76-91cf-5b6b138d7675/kube-multus/1.log" Nov 28 15:26:28 crc kubenswrapper[4647]: I1128 15:26:28.393788 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:28 crc kubenswrapper[4647]: I1128 15:26:28.393876 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:28 crc kubenswrapper[4647]: E1128 15:26:28.393926 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:28 crc kubenswrapper[4647]: E1128 15:26:28.394080 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:28 crc kubenswrapper[4647]: I1128 15:26:28.394144 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:28 crc kubenswrapper[4647]: E1128 15:26:28.394297 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:28 crc kubenswrapper[4647]: I1128 15:26:28.394359 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:28 crc kubenswrapper[4647]: E1128 15:26:28.394464 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:30 crc kubenswrapper[4647]: I1128 15:26:30.393404 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:30 crc kubenswrapper[4647]: I1128 15:26:30.393529 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:30 crc kubenswrapper[4647]: I1128 15:26:30.393589 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:30 crc kubenswrapper[4647]: I1128 15:26:30.393493 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:30 crc kubenswrapper[4647]: E1128 15:26:30.394534 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:30 crc kubenswrapper[4647]: E1128 15:26:30.394627 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:30 crc kubenswrapper[4647]: E1128 15:26:30.394718 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:30 crc kubenswrapper[4647]: E1128 15:26:30.394822 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:30 crc kubenswrapper[4647]: I1128 15:26:30.394883 4647 scope.go:117] "RemoveContainer" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:26:30 crc kubenswrapper[4647]: E1128 15:26:30.396581 4647 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 28 15:26:30 crc kubenswrapper[4647]: E1128 15:26:30.546041 4647 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 15:26:31 crc kubenswrapper[4647]: I1128 15:26:31.063051 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/3.log" Nov 28 15:26:31 crc kubenswrapper[4647]: I1128 15:26:31.066633 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerStarted","Data":"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef"} Nov 28 15:26:31 crc kubenswrapper[4647]: I1128 15:26:31.067091 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:26:31 crc kubenswrapper[4647]: I1128 15:26:31.101088 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podStartSLOduration=99.101060814 podStartE2EDuration="1m39.101060814s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:31.098450581 +0000 UTC m=+120.946057032" watchObservedRunningTime="2025-11-28 15:26:31.101060814 +0000 UTC m=+120.948667235" Nov 28 15:26:31 crc kubenswrapper[4647]: I1128 15:26:31.401781 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-cz6sq"] Nov 28 15:26:31 crc kubenswrapper[4647]: I1128 15:26:31.401996 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:31 crc kubenswrapper[4647]: E1128 15:26:31.402176 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:32 crc kubenswrapper[4647]: I1128 15:26:32.393835 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:32 crc kubenswrapper[4647]: E1128 15:26:32.394065 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:32 crc kubenswrapper[4647]: I1128 15:26:32.394441 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:32 crc kubenswrapper[4647]: E1128 15:26:32.394552 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:32 crc kubenswrapper[4647]: I1128 15:26:32.394869 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:32 crc kubenswrapper[4647]: E1128 15:26:32.395015 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:33 crc kubenswrapper[4647]: I1128 15:26:33.394277 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:33 crc kubenswrapper[4647]: E1128 15:26:33.395135 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:34 crc kubenswrapper[4647]: I1128 15:26:34.394058 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:34 crc kubenswrapper[4647]: I1128 15:26:34.394087 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:34 crc kubenswrapper[4647]: I1128 15:26:34.394176 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:34 crc kubenswrapper[4647]: E1128 15:26:34.394394 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:34 crc kubenswrapper[4647]: E1128 15:26:34.394626 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:34 crc kubenswrapper[4647]: E1128 15:26:34.394793 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:35 crc kubenswrapper[4647]: I1128 15:26:35.393575 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:35 crc kubenswrapper[4647]: E1128 15:26:35.393736 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:35 crc kubenswrapper[4647]: E1128 15:26:35.548311 4647 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 15:26:36 crc kubenswrapper[4647]: I1128 15:26:36.393439 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:36 crc kubenswrapper[4647]: I1128 15:26:36.393444 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:36 crc kubenswrapper[4647]: E1128 15:26:36.393645 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:36 crc kubenswrapper[4647]: I1128 15:26:36.393794 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:36 crc kubenswrapper[4647]: E1128 15:26:36.393944 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:36 crc kubenswrapper[4647]: E1128 15:26:36.394056 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:37 crc kubenswrapper[4647]: I1128 15:26:37.393307 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:37 crc kubenswrapper[4647]: E1128 15:26:37.393467 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:38 crc kubenswrapper[4647]: I1128 15:26:38.396257 4647 scope.go:117] "RemoveContainer" containerID="8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda" Nov 28 15:26:38 crc kubenswrapper[4647]: I1128 15:26:38.396574 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:38 crc kubenswrapper[4647]: E1128 15:26:38.396633 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:38 crc kubenswrapper[4647]: I1128 15:26:38.396787 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:38 crc kubenswrapper[4647]: E1128 15:26:38.396844 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:38 crc kubenswrapper[4647]: I1128 15:26:38.396938 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:38 crc kubenswrapper[4647]: E1128 15:26:38.396977 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:39 crc kubenswrapper[4647]: I1128 15:26:39.101696 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4mdqn_8fe12df9-7deb-4f76-91cf-5b6b138d7675/kube-multus/1.log" Nov 28 15:26:39 crc kubenswrapper[4647]: I1128 15:26:39.102086 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4mdqn" event={"ID":"8fe12df9-7deb-4f76-91cf-5b6b138d7675","Type":"ContainerStarted","Data":"8fd252daf65ad1190bad8c500d605b34feb9342c514b2cd718f506a2e6278aae"} Nov 28 15:26:39 crc kubenswrapper[4647]: I1128 15:26:39.393310 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:39 crc kubenswrapper[4647]: E1128 15:26:39.393780 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:40 crc kubenswrapper[4647]: I1128 15:26:40.393840 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:40 crc kubenswrapper[4647]: I1128 15:26:40.393887 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:40 crc kubenswrapper[4647]: I1128 15:26:40.393840 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:40 crc kubenswrapper[4647]: E1128 15:26:40.394026 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:40 crc kubenswrapper[4647]: E1128 15:26:40.394178 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:40 crc kubenswrapper[4647]: E1128 15:26:40.394278 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:40 crc kubenswrapper[4647]: E1128 15:26:40.554849 4647 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 15:26:41 crc kubenswrapper[4647]: I1128 15:26:41.393909 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:41 crc kubenswrapper[4647]: E1128 15:26:41.394826 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:42 crc kubenswrapper[4647]: I1128 15:26:42.393685 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:42 crc kubenswrapper[4647]: I1128 15:26:42.393860 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:42 crc kubenswrapper[4647]: E1128 15:26:42.394008 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:42 crc kubenswrapper[4647]: I1128 15:26:42.394034 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:42 crc kubenswrapper[4647]: E1128 15:26:42.394307 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:42 crc kubenswrapper[4647]: E1128 15:26:42.394536 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:43 crc kubenswrapper[4647]: I1128 15:26:43.393942 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:43 crc kubenswrapper[4647]: E1128 15:26:43.394106 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:44 crc kubenswrapper[4647]: I1128 15:26:44.394717 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:44 crc kubenswrapper[4647]: I1128 15:26:44.394763 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:44 crc kubenswrapper[4647]: E1128 15:26:44.394947 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:26:44 crc kubenswrapper[4647]: I1128 15:26:44.395200 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:44 crc kubenswrapper[4647]: E1128 15:26:44.395297 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:26:44 crc kubenswrapper[4647]: E1128 15:26:44.395522 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:26:45 crc kubenswrapper[4647]: I1128 15:26:45.393529 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:45 crc kubenswrapper[4647]: E1128 15:26:45.393746 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-cz6sq" podUID="a79b0b39-cffb-4ac3-a526-837c6aa70616" Nov 28 15:26:46 crc kubenswrapper[4647]: I1128 15:26:46.394298 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:46 crc kubenswrapper[4647]: I1128 15:26:46.394440 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:46 crc kubenswrapper[4647]: I1128 15:26:46.394457 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:46 crc kubenswrapper[4647]: I1128 15:26:46.399267 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 15:26:46 crc kubenswrapper[4647]: I1128 15:26:46.401618 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 15:26:46 crc kubenswrapper[4647]: I1128 15:26:46.402137 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 15:26:46 crc kubenswrapper[4647]: I1128 15:26:46.403552 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 15:26:47 crc kubenswrapper[4647]: I1128 15:26:47.393959 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:26:47 crc kubenswrapper[4647]: I1128 15:26:47.397997 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 15:26:47 crc kubenswrapper[4647]: I1128 15:26:47.398591 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.150242 4647 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.228184 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6t8hg"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.228976 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.232843 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.232899 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.236239 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.237099 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.239041 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.242987 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.249381 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.250259 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.251089 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vcv8n"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.251802 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.257670 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.258706 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.259756 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.261142 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-g75wv"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.279788 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.280882 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.281447 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.282071 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.282311 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.282428 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.282527 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.282598 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.282655 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.282768 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.282926 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.283552 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.284290 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.284498 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.284783 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.285551 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.286733 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.287138 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.287471 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.295371 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.296072 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.297028 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.297439 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.297952 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.297995 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.298314 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.298371 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.299585 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.300043 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.300646 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5rsn5"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.301084 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.302910 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.303380 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.303727 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.303962 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.314591 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.315513 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.315683 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.315832 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.315983 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316120 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316171 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316247 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316372 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316392 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316508 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316576 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316607 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316726 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316833 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.316970 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.317075 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.317238 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.317904 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.318174 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.318550 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.319561 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.322568 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.328130 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.336276 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.336625 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.337252 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.338990 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.340245 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.340676 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.340962 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.341168 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.341343 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.342463 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.342609 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.342774 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.342893 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.344747 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.345430 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.345948 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.347292 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.347509 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350370 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc3703bb-9abe-4db2-8988-0698f9c97957-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wwsbr\" (UID: \"dc3703bb-9abe-4db2-8988-0698f9c97957\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350447 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/232ba417-fd39-4e22-b02f-5a911f9f8b33-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8qtsj\" (UID: \"232ba417-fd39-4e22-b02f-5a911f9f8b33\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350497 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc3703bb-9abe-4db2-8988-0698f9c97957-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wwsbr\" (UID: \"dc3703bb-9abe-4db2-8988-0698f9c97957\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350532 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/232ba417-fd39-4e22-b02f-5a911f9f8b33-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8qtsj\" (UID: \"232ba417-fd39-4e22-b02f-5a911f9f8b33\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350575 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4408688c-7115-4338-9b06-e30b0ed30399-images\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350605 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc3703bb-9abe-4db2-8988-0698f9c97957-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wwsbr\" (UID: \"dc3703bb-9abe-4db2-8988-0698f9c97957\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350630 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4408688c-7115-4338-9b06-e30b0ed30399-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350658 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvqrw\" (UniqueName: \"kubernetes.io/projected/6b633443-1559-45ea-84d7-41ac090ad0a9-kube-api-access-kvqrw\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350680 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2lk2\" (UniqueName: \"kubernetes.io/projected/0c3edb05-7e83-4753-bb17-23dc077830c4-kube-api-access-r2lk2\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350718 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-config\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350772 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350795 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-client-ca\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350818 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-client-ca\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350838 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/232ba417-fd39-4e22-b02f-5a911f9f8b33-config\") pod \"kube-controller-manager-operator-78b949d7b-8qtsj\" (UID: \"232ba417-fd39-4e22-b02f-5a911f9f8b33\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350861 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4408688c-7115-4338-9b06-e30b0ed30399-config\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350880 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sphrh\" (UniqueName: \"kubernetes.io/projected/4408688c-7115-4338-9b06-e30b0ed30399-kube-api-access-sphrh\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350905 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c3edb05-7e83-4753-bb17-23dc077830c4-serving-cert\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350926 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b633443-1559-45ea-84d7-41ac090ad0a9-serving-cert\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.350976 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-config\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.353854 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-8cp4n"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.364262 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-n7kp2"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.365372 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-ng9vc"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.366177 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.357898 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.367254 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.367733 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.358007 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.372586 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-7z7l2"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.358094 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.358110 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.358187 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.358217 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.358480 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.358663 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.359202 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.360291 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.363269 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.363787 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.363894 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.364000 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.364225 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.365272 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.365349 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.365394 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.371876 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.371955 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.372027 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.372332 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.372375 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.372715 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.372769 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.372867 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.372909 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.373008 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.373015 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.373098 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.373163 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.389372 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.391634 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.391761 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.392149 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.392309 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.392375 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.392532 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.392819 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.393348 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-z99bl"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.393609 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.393864 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.393878 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7z7l2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.394323 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.394491 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.394765 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.395133 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.395132 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.395402 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.395436 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.395560 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.395923 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.396550 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.402376 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.402723 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.403098 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.403242 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.403621 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.403985 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.404293 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.414303 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.415303 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.415516 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-ql2nw"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.416121 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.416207 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.417316 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.417885 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.418186 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.418504 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.418635 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-vklkx"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.419462 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.420122 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.420401 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-k2k77"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.420552 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.421105 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.424572 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-l52jr"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.425490 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.425683 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-t88ct"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.426371 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.427646 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-76zr7"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.428163 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.428873 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.429520 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.429959 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6t8hg"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.432847 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.443982 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.444847 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.446368 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.453072 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.453213 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktn2t\" (UniqueName: \"kubernetes.io/projected/c0e19f9c-f340-40c4-82c8-cebc432e7e15-kube-api-access-ktn2t\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.453321 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fc997855-51c2-423d-876b-055ecf2df450-audit-policies\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.453501 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glbz5\" (UniqueName: \"kubernetes.io/projected/5e711623-78c2-4e1f-a65f-1c4d871e3d21-kube-api-access-glbz5\") pod \"downloads-7954f5f757-7z7l2\" (UID: \"5e711623-78c2-4e1f-a65f-1c4d871e3d21\") " pod="openshift-console/downloads-7954f5f757-7z7l2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.453609 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/c044b720-8bbd-4d48-a61b-d37188cfa478-tmpfs\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.453704 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e82223-b810-4483-8505-82973a515276-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-76c8x\" (UID: \"c7e82223-b810-4483-8505-82973a515276\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.453804 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4408688c-7115-4338-9b06-e30b0ed30399-images\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.453930 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc3703bb-9abe-4db2-8988-0698f9c97957-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wwsbr\" (UID: \"dc3703bb-9abe-4db2-8988-0698f9c97957\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.454022 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/229d7233-477b-42a8-81e3-5437db9d608e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qqcpz\" (UID: \"229d7233-477b-42a8-81e3-5437db9d608e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.454126 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b9890322-9811-4113-bc61-3306b91dfd5c-srv-cert\") pod \"olm-operator-6b444d44fb-8tcng\" (UID: \"b9890322-9811-4113-bc61-3306b91dfd5c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.454236 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4408688c-7115-4338-9b06-e30b0ed30399-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.454331 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flkv5\" (UniqueName: \"kubernetes.io/projected/4a2a1306-2eff-4fc2-ac8c-8bb461353abd-kube-api-access-flkv5\") pod \"control-plane-machine-set-operator-78cbb6b69f-5gqvz\" (UID: \"4a2a1306-2eff-4fc2-ac8c-8bb461353abd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.454439 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd93397a-9a96-4902-990a-e7524fec3dd2-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rmbtx\" (UID: \"fd93397a-9a96-4902-990a-e7524fec3dd2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.454538 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c044b720-8bbd-4d48-a61b-d37188cfa478-apiservice-cert\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.454640 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvqrw\" (UniqueName: \"kubernetes.io/projected/6b633443-1559-45ea-84d7-41ac090ad0a9-kube-api-access-kvqrw\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.454743 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2lk2\" (UniqueName: \"kubernetes.io/projected/0c3edb05-7e83-4753-bb17-23dc077830c4-kube-api-access-r2lk2\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.454844 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx4rb\" (UniqueName: \"kubernetes.io/projected/001333da-a6ba-41b2-a280-8b4825dd8a41-kube-api-access-rx4rb\") pod \"openshift-controller-manager-operator-756b6f6bc6-l8fwr\" (UID: \"001333da-a6ba-41b2-a280-8b4825dd8a41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.454935 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlq8p\" (UniqueName: \"kubernetes.io/projected/41268623-f4ac-491d-8948-c002f6eac77f-kube-api-access-mlq8p\") pod \"service-ca-9c57cc56f-ng9vc\" (UID: \"41268623-f4ac-491d-8948-c002f6eac77f\") " pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.455021 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g56gz\" (UniqueName: \"kubernetes.io/projected/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-kube-api-access-g56gz\") pod \"collect-profiles-29405715-xc84q\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.455126 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ecd537d5-ea50-48b2-8565-283566427e38-audit-dir\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.455247 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f485t\" (UniqueName: \"kubernetes.io/projected/24db15e7-f30e-47c0-8227-a38041aea560-kube-api-access-f485t\") pod \"multus-admission-controller-857f4d67dd-g75wv\" (UID: \"24db15e7-f30e-47c0-8227-a38041aea560\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.455379 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-config\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.455539 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.455681 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-client-ca\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.455825 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/229d7233-477b-42a8-81e3-5437db9d608e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qqcpz\" (UID: \"229d7233-477b-42a8-81e3-5437db9d608e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.455958 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d20da780-3761-4116-9d42-aa980d8ebbb8-profile-collector-cert\") pod \"catalog-operator-68c6474976-wx7sm\" (UID: \"d20da780-3761-4116-9d42-aa980d8ebbb8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.456098 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-client-ca\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.456246 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d750be5a-3ad8-4a6b-b959-479aacbb4f95-serving-cert\") pod \"service-ca-operator-777779d784-rt5dp\" (UID: \"d750be5a-3ad8-4a6b-b959-479aacbb4f95\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.456398 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d750be5a-3ad8-4a6b-b959-479aacbb4f95-config\") pod \"service-ca-operator-777779d784-rt5dp\" (UID: \"d750be5a-3ad8-4a6b-b959-479aacbb4f95\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.456584 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c044b720-8bbd-4d48-a61b-d37188cfa478-webhook-cert\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457548 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/232ba417-fd39-4e22-b02f-5a911f9f8b33-config\") pod \"kube-controller-manager-operator-78b949d7b-8qtsj\" (UID: \"232ba417-fd39-4e22-b02f-5a911f9f8b33\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457589 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457613 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgzmg\" (UniqueName: \"kubernetes.io/projected/ecd537d5-ea50-48b2-8565-283566427e38-kube-api-access-jgzmg\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457639 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/fc997855-51c2-423d-876b-055ecf2df450-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457658 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sfpgz\" (UID: \"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457679 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4408688c-7115-4338-9b06-e30b0ed30399-config\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457697 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9r9q\" (UniqueName: \"kubernetes.io/projected/fd93397a-9a96-4902-990a-e7524fec3dd2-kube-api-access-k9r9q\") pod \"package-server-manager-789f6589d5-rmbtx\" (UID: \"fd93397a-9a96-4902-990a-e7524fec3dd2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457718 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sphrh\" (UniqueName: \"kubernetes.io/projected/4408688c-7115-4338-9b06-e30b0ed30399-kube-api-access-sphrh\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457734 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gn7f\" (UniqueName: \"kubernetes.io/projected/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-kube-api-access-4gn7f\") pod \"marketplace-operator-79b997595-5rsn5\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457756 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457773 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72d6d\" (UniqueName: \"kubernetes.io/projected/8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c-kube-api-access-72d6d\") pod \"machine-config-controller-84d6567774-sfpgz\" (UID: \"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457791 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/001333da-a6ba-41b2-a280-8b4825dd8a41-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-l8fwr\" (UID: \"001333da-a6ba-41b2-a280-8b4825dd8a41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457808 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a9d10933-4352-41fb-94af-6239e1c90b98-machine-approver-tls\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457840 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c8ca49c1-6905-4211-a0dd-642ffd28f28c-images\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457860 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzz5b\" (UniqueName: \"kubernetes.io/projected/d750be5a-3ad8-4a6b-b959-479aacbb4f95-kube-api-access-nzz5b\") pod \"service-ca-operator-777779d784-rt5dp\" (UID: \"d750be5a-3ad8-4a6b-b959-479aacbb4f95\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457876 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7e82223-b810-4483-8505-82973a515276-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-76c8x\" (UID: \"c7e82223-b810-4483-8505-82973a515276\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457897 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c3edb05-7e83-4753-bb17-23dc077830c4-serving-cert\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457913 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c8ca49c1-6905-4211-a0dd-642ffd28f28c-proxy-tls\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457935 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ffc95f7-12ba-4daf-b0ce-1236138a4844-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-f9nm6\" (UID: \"3ffc95f7-12ba-4daf-b0ce-1236138a4844\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457953 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7r92\" (UniqueName: \"kubernetes.io/projected/fc997855-51c2-423d-876b-055ecf2df450-kube-api-access-c7r92\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457968 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cpdm\" (UniqueName: \"kubernetes.io/projected/d20da780-3761-4116-9d42-aa980d8ebbb8-kube-api-access-6cpdm\") pod \"catalog-operator-68c6474976-wx7sm\" (UID: \"d20da780-3761-4116-9d42-aa980d8ebbb8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457985 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/fc997855-51c2-423d-876b-055ecf2df450-etcd-client\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458003 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fc997855-51c2-423d-876b-055ecf2df450-audit-dir\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458024 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-config-volume\") pod \"collect-profiles-29405715-xc84q\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458043 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b633443-1559-45ea-84d7-41ac090ad0a9-serving-cert\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458060 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/001333da-a6ba-41b2-a280-8b4825dd8a41-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-l8fwr\" (UID: \"001333da-a6ba-41b2-a280-8b4825dd8a41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458078 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc997855-51c2-423d-876b-055ecf2df450-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458121 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458138 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ffc95f7-12ba-4daf-b0ce-1236138a4844-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-f9nm6\" (UID: \"3ffc95f7-12ba-4daf-b0ce-1236138a4844\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458146 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-client-ca\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.455134 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4408688c-7115-4338-9b06-e30b0ed30399-images\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458193 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c-proxy-tls\") pod \"machine-config-controller-84d6567774-sfpgz\" (UID: \"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458235 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9d10933-4352-41fb-94af-6239e1c90b98-config\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458260 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c8ca49c1-6905-4211-a0dd-642ffd28f28c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458276 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458293 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/41268623-f4ac-491d-8948-c002f6eac77f-signing-key\") pod \"service-ca-9c57cc56f-ng9vc\" (UID: \"41268623-f4ac-491d-8948-c002f6eac77f\") " pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458311 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5rsn5\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458330 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/24db15e7-f30e-47c0-8227-a38041aea560-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-g75wv\" (UID: \"24db15e7-f30e-47c0-8227-a38041aea560\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458350 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-config\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458368 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzsks\" (UniqueName: \"kubernetes.io/projected/229d7233-477b-42a8-81e3-5437db9d608e-kube-api-access-hzsks\") pod \"openshift-apiserver-operator-796bbdcf4f-qqcpz\" (UID: \"229d7233-477b-42a8-81e3-5437db9d608e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458386 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0e19f9c-f340-40c4-82c8-cebc432e7e15-config\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458403 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d20da780-3761-4116-9d42-aa980d8ebbb8-srv-cert\") pod \"catalog-operator-68c6474976-wx7sm\" (UID: \"d20da780-3761-4116-9d42-aa980d8ebbb8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458438 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458456 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458471 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc9hd\" (UniqueName: \"kubernetes.io/projected/a9d10933-4352-41fb-94af-6239e1c90b98-kube-api-access-cc9hd\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458488 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fc997855-51c2-423d-876b-055ecf2df450-serving-cert\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458504 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2q7c\" (UniqueName: \"kubernetes.io/projected/b9890322-9811-4113-bc61-3306b91dfd5c-kube-api-access-s2q7c\") pod \"olm-operator-6b444d44fb-8tcng\" (UID: \"b9890322-9811-4113-bc61-3306b91dfd5c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458532 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc3703bb-9abe-4db2-8988-0698f9c97957-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wwsbr\" (UID: \"dc3703bb-9abe-4db2-8988-0698f9c97957\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458549 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btsht\" (UniqueName: \"kubernetes.io/projected/c7e82223-b810-4483-8505-82973a515276-kube-api-access-btsht\") pod \"kube-storage-version-migrator-operator-b67b599dd-76c8x\" (UID: \"c7e82223-b810-4483-8505-82973a515276\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458566 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b9890322-9811-4113-bc61-3306b91dfd5c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8tcng\" (UID: \"b9890322-9811-4113-bc61-3306b91dfd5c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458589 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4frvk\" (UniqueName: \"kubernetes.io/projected/c044b720-8bbd-4d48-a61b-d37188cfa478-kube-api-access-4frvk\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458608 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prw4q\" (UniqueName: \"kubernetes.io/projected/c8ca49c1-6905-4211-a0dd-642ffd28f28c-kube-api-access-prw4q\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458623 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0e19f9c-f340-40c4-82c8-cebc432e7e15-trusted-ca\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458640 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/232ba417-fd39-4e22-b02f-5a911f9f8b33-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8qtsj\" (UID: \"232ba417-fd39-4e22-b02f-5a911f9f8b33\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458657 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-audit-policies\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458675 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458696 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/4a2a1306-2eff-4fc2-ac8c-8bb461353abd-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-5gqvz\" (UID: \"4a2a1306-2eff-4fc2-ac8c-8bb461353abd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458717 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a9d10933-4352-41fb-94af-6239e1c90b98-auth-proxy-config\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458727 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/232ba417-fd39-4e22-b02f-5a911f9f8b33-config\") pod \"kube-controller-manager-operator-78b949d7b-8qtsj\" (UID: \"232ba417-fd39-4e22-b02f-5a911f9f8b33\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.458738 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-secret-volume\") pod \"collect-profiles-29405715-xc84q\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457494 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.457481 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-client-ca\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.479478 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4408688c-7115-4338-9b06-e30b0ed30399-config\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.488180 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc3703bb-9abe-4db2-8988-0698f9c97957-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wwsbr\" (UID: \"dc3703bb-9abe-4db2-8988-0698f9c97957\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.488766 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4408688c-7115-4338-9b06-e30b0ed30399-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.488922 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc3703bb-9abe-4db2-8988-0698f9c97957-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wwsbr\" (UID: \"dc3703bb-9abe-4db2-8988-0698f9c97957\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.488955 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/232ba417-fd39-4e22-b02f-5a911f9f8b33-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8qtsj\" (UID: \"232ba417-fd39-4e22-b02f-5a911f9f8b33\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.488986 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.489044 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.489067 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/41268623-f4ac-491d-8948-c002f6eac77f-signing-cabundle\") pod \"service-ca-9c57cc56f-ng9vc\" (UID: \"41268623-f4ac-491d-8948-c002f6eac77f\") " pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.489095 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.489124 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ffc95f7-12ba-4daf-b0ce-1236138a4844-config\") pod \"kube-apiserver-operator-766d6c64bb-f9nm6\" (UID: \"3ffc95f7-12ba-4daf-b0ce-1236138a4844\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.489151 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5rsn5\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.489195 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/fc997855-51c2-423d-876b-055ecf2df450-encryption-config\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.489221 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0e19f9c-f340-40c4-82c8-cebc432e7e15-serving-cert\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.490292 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c3edb05-7e83-4753-bb17-23dc077830c4-serving-cert\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.490342 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.490376 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-g75wv"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.490389 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vcv8n"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.491537 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.492719 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-config\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.494051 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-config\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.497114 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.501653 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.502427 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b633443-1559-45ea-84d7-41ac090ad0a9-serving-cert\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.502924 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/232ba417-fd39-4e22-b02f-5a911f9f8b33-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8qtsj\" (UID: \"232ba417-fd39-4e22-b02f-5a911f9f8b33\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.507679 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-ng9vc"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.508246 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-n7kp2"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.509801 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc3703bb-9abe-4db2-8988-0698f9c97957-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wwsbr\" (UID: \"dc3703bb-9abe-4db2-8988-0698f9c97957\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.511563 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.515289 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.518087 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.526008 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.531817 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.534875 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.546858 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.546905 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.548124 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.553862 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.556698 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-8cp4n"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.558346 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7z7l2"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.559502 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.562637 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.562698 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.563569 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-glkws"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.564826 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.566543 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-tg7kf"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.566988 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-tg7kf" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.568807 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.569381 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5rsn5"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.570881 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-z99bl"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.574263 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-76zr7"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.574337 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-k2k77"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.574494 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.577095 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.577122 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-ql2nw"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.580531 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-lkh97"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.581871 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.582017 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.583565 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-xqbx6"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.584330 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.584680 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-glkws"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.587912 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-vklkx"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.587949 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-l52jr"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.588546 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.589348 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-lkh97"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590278 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/229d7233-477b-42a8-81e3-5437db9d608e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qqcpz\" (UID: \"229d7233-477b-42a8-81e3-5437db9d608e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590309 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d20da780-3761-4116-9d42-aa980d8ebbb8-profile-collector-cert\") pod \"catalog-operator-68c6474976-wx7sm\" (UID: \"d20da780-3761-4116-9d42-aa980d8ebbb8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590340 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c044b720-8bbd-4d48-a61b-d37188cfa478-webhook-cert\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590374 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d750be5a-3ad8-4a6b-b959-479aacbb4f95-serving-cert\") pod \"service-ca-operator-777779d784-rt5dp\" (UID: \"d750be5a-3ad8-4a6b-b959-479aacbb4f95\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590393 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d750be5a-3ad8-4a6b-b959-479aacbb4f95-config\") pod \"service-ca-operator-777779d784-rt5dp\" (UID: \"d750be5a-3ad8-4a6b-b959-479aacbb4f95\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590437 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590460 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgzmg\" (UniqueName: \"kubernetes.io/projected/ecd537d5-ea50-48b2-8565-283566427e38-kube-api-access-jgzmg\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590478 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/fc997855-51c2-423d-876b-055ecf2df450-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590498 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sfpgz\" (UID: \"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590515 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gn7f\" (UniqueName: \"kubernetes.io/projected/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-kube-api-access-4gn7f\") pod \"marketplace-operator-79b997595-5rsn5\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590533 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9r9q\" (UniqueName: \"kubernetes.io/projected/fd93397a-9a96-4902-990a-e7524fec3dd2-kube-api-access-k9r9q\") pod \"package-server-manager-789f6589d5-rmbtx\" (UID: \"fd93397a-9a96-4902-990a-e7524fec3dd2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590557 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzz5b\" (UniqueName: \"kubernetes.io/projected/d750be5a-3ad8-4a6b-b959-479aacbb4f95-kube-api-access-nzz5b\") pod \"service-ca-operator-777779d784-rt5dp\" (UID: \"d750be5a-3ad8-4a6b-b959-479aacbb4f95\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590573 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590590 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72d6d\" (UniqueName: \"kubernetes.io/projected/8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c-kube-api-access-72d6d\") pod \"machine-config-controller-84d6567774-sfpgz\" (UID: \"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590608 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/001333da-a6ba-41b2-a280-8b4825dd8a41-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-l8fwr\" (UID: \"001333da-a6ba-41b2-a280-8b4825dd8a41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590627 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a9d10933-4352-41fb-94af-6239e1c90b98-machine-approver-tls\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590645 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c8ca49c1-6905-4211-a0dd-642ffd28f28c-images\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590661 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7e82223-b810-4483-8505-82973a515276-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-76c8x\" (UID: \"c7e82223-b810-4483-8505-82973a515276\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.590676 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c8ca49c1-6905-4211-a0dd-642ffd28f28c-proxy-tls\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.591665 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cpdm\" (UniqueName: \"kubernetes.io/projected/d20da780-3761-4116-9d42-aa980d8ebbb8-kube-api-access-6cpdm\") pod \"catalog-operator-68c6474976-wx7sm\" (UID: \"d20da780-3761-4116-9d42-aa980d8ebbb8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.591724 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ffc95f7-12ba-4daf-b0ce-1236138a4844-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-f9nm6\" (UID: \"3ffc95f7-12ba-4daf-b0ce-1236138a4844\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.591744 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7r92\" (UniqueName: \"kubernetes.io/projected/fc997855-51c2-423d-876b-055ecf2df450-kube-api-access-c7r92\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.591760 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/fc997855-51c2-423d-876b-055ecf2df450-etcd-client\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.591786 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fc997855-51c2-423d-876b-055ecf2df450-audit-dir\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.591811 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-config-volume\") pod \"collect-profiles-29405715-xc84q\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.591962 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc997855-51c2-423d-876b-055ecf2df450-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592023 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/001333da-a6ba-41b2-a280-8b4825dd8a41-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-l8fwr\" (UID: \"001333da-a6ba-41b2-a280-8b4825dd8a41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592071 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592095 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ffc95f7-12ba-4daf-b0ce-1236138a4844-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-f9nm6\" (UID: \"3ffc95f7-12ba-4daf-b0ce-1236138a4844\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592133 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c-proxy-tls\") pod \"machine-config-controller-84d6567774-sfpgz\" (UID: \"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592155 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9d10933-4352-41fb-94af-6239e1c90b98-config\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592180 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5rsn5\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592210 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c8ca49c1-6905-4211-a0dd-642ffd28f28c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592234 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592256 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/41268623-f4ac-491d-8948-c002f6eac77f-signing-key\") pod \"service-ca-9c57cc56f-ng9vc\" (UID: \"41268623-f4ac-491d-8948-c002f6eac77f\") " pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592280 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/24db15e7-f30e-47c0-8227-a38041aea560-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-g75wv\" (UID: \"24db15e7-f30e-47c0-8227-a38041aea560\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592302 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzsks\" (UniqueName: \"kubernetes.io/projected/229d7233-477b-42a8-81e3-5437db9d608e-kube-api-access-hzsks\") pod \"openshift-apiserver-operator-796bbdcf4f-qqcpz\" (UID: \"229d7233-477b-42a8-81e3-5437db9d608e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592322 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0e19f9c-f340-40c4-82c8-cebc432e7e15-config\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592339 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d20da780-3761-4116-9d42-aa980d8ebbb8-srv-cert\") pod \"catalog-operator-68c6474976-wx7sm\" (UID: \"d20da780-3761-4116-9d42-aa980d8ebbb8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592357 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592376 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592392 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc9hd\" (UniqueName: \"kubernetes.io/projected/a9d10933-4352-41fb-94af-6239e1c90b98-kube-api-access-cc9hd\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592407 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fc997855-51c2-423d-876b-055ecf2df450-serving-cert\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592442 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2q7c\" (UniqueName: \"kubernetes.io/projected/b9890322-9811-4113-bc61-3306b91dfd5c-kube-api-access-s2q7c\") pod \"olm-operator-6b444d44fb-8tcng\" (UID: \"b9890322-9811-4113-bc61-3306b91dfd5c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592541 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btsht\" (UniqueName: \"kubernetes.io/projected/c7e82223-b810-4483-8505-82973a515276-kube-api-access-btsht\") pod \"kube-storage-version-migrator-operator-b67b599dd-76c8x\" (UID: \"c7e82223-b810-4483-8505-82973a515276\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592560 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b9890322-9811-4113-bc61-3306b91dfd5c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8tcng\" (UID: \"b9890322-9811-4113-bc61-3306b91dfd5c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592578 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4frvk\" (UniqueName: \"kubernetes.io/projected/c044b720-8bbd-4d48-a61b-d37188cfa478-kube-api-access-4frvk\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592596 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prw4q\" (UniqueName: \"kubernetes.io/projected/c8ca49c1-6905-4211-a0dd-642ffd28f28c-kube-api-access-prw4q\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592613 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0e19f9c-f340-40c4-82c8-cebc432e7e15-trusted-ca\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592632 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-audit-policies\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592648 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592667 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/4a2a1306-2eff-4fc2-ac8c-8bb461353abd-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-5gqvz\" (UID: \"4a2a1306-2eff-4fc2-ac8c-8bb461353abd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592685 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a9d10933-4352-41fb-94af-6239e1c90b98-auth-proxy-config\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592704 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-secret-volume\") pod \"collect-profiles-29405715-xc84q\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592742 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592759 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592787 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/41268623-f4ac-491d-8948-c002f6eac77f-signing-cabundle\") pod \"service-ca-9c57cc56f-ng9vc\" (UID: \"41268623-f4ac-491d-8948-c002f6eac77f\") " pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592806 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592825 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ffc95f7-12ba-4daf-b0ce-1236138a4844-config\") pod \"kube-apiserver-operator-766d6c64bb-f9nm6\" (UID: \"3ffc95f7-12ba-4daf-b0ce-1236138a4844\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592840 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5rsn5\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592861 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/fc997855-51c2-423d-876b-055ecf2df450-encryption-config\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592877 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0e19f9c-f340-40c4-82c8-cebc432e7e15-serving-cert\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592895 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktn2t\" (UniqueName: \"kubernetes.io/projected/c0e19f9c-f340-40c4-82c8-cebc432e7e15-kube-api-access-ktn2t\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592915 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592935 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fc997855-51c2-423d-876b-055ecf2df450-audit-policies\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592957 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glbz5\" (UniqueName: \"kubernetes.io/projected/5e711623-78c2-4e1f-a65f-1c4d871e3d21-kube-api-access-glbz5\") pod \"downloads-7954f5f757-7z7l2\" (UID: \"5e711623-78c2-4e1f-a65f-1c4d871e3d21\") " pod="openshift-console/downloads-7954f5f757-7z7l2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592979 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/c044b720-8bbd-4d48-a61b-d37188cfa478-tmpfs\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.592998 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e82223-b810-4483-8505-82973a515276-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-76c8x\" (UID: \"c7e82223-b810-4483-8505-82973a515276\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.593018 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/229d7233-477b-42a8-81e3-5437db9d608e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qqcpz\" (UID: \"229d7233-477b-42a8-81e3-5437db9d608e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.593035 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b9890322-9811-4113-bc61-3306b91dfd5c-srv-cert\") pod \"olm-operator-6b444d44fb-8tcng\" (UID: \"b9890322-9811-4113-bc61-3306b91dfd5c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.593055 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flkv5\" (UniqueName: \"kubernetes.io/projected/4a2a1306-2eff-4fc2-ac8c-8bb461353abd-kube-api-access-flkv5\") pod \"control-plane-machine-set-operator-78cbb6b69f-5gqvz\" (UID: \"4a2a1306-2eff-4fc2-ac8c-8bb461353abd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.593074 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd93397a-9a96-4902-990a-e7524fec3dd2-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rmbtx\" (UID: \"fd93397a-9a96-4902-990a-e7524fec3dd2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.593093 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c044b720-8bbd-4d48-a61b-d37188cfa478-apiservice-cert\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.593125 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx4rb\" (UniqueName: \"kubernetes.io/projected/001333da-a6ba-41b2-a280-8b4825dd8a41-kube-api-access-rx4rb\") pod \"openshift-controller-manager-operator-756b6f6bc6-l8fwr\" (UID: \"001333da-a6ba-41b2-a280-8b4825dd8a41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.593144 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlq8p\" (UniqueName: \"kubernetes.io/projected/41268623-f4ac-491d-8948-c002f6eac77f-kube-api-access-mlq8p\") pod \"service-ca-9c57cc56f-ng9vc\" (UID: \"41268623-f4ac-491d-8948-c002f6eac77f\") " pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.593160 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g56gz\" (UniqueName: \"kubernetes.io/projected/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-kube-api-access-g56gz\") pod \"collect-profiles-29405715-xc84q\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.593186 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ecd537d5-ea50-48b2-8565-283566427e38-audit-dir\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.593203 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f485t\" (UniqueName: \"kubernetes.io/projected/24db15e7-f30e-47c0-8227-a38041aea560-kube-api-access-f485t\") pod \"multus-admission-controller-857f4d67dd-g75wv\" (UID: \"24db15e7-f30e-47c0-8227-a38041aea560\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.594184 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.594360 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-audit-policies\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.594722 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0e19f9c-f340-40c4-82c8-cebc432e7e15-trusted-ca\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.594989 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c8ca49c1-6905-4211-a0dd-642ffd28f28c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.595236 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.598480 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0e19f9c-f340-40c4-82c8-cebc432e7e15-config\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.599228 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.599264 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.599274 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-tg7kf"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.604500 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fc997855-51c2-423d-876b-055ecf2df450-audit-dir\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.605160 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c8ca49c1-6905-4211-a0dd-642ffd28f28c-images\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.606146 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-config-volume\") pod \"collect-profiles-29405715-xc84q\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.607103 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc997855-51c2-423d-876b-055ecf2df450-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.607757 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/001333da-a6ba-41b2-a280-8b4825dd8a41-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-l8fwr\" (UID: \"001333da-a6ba-41b2-a280-8b4825dd8a41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.608656 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl"] Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.610460 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.611831 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a9d10933-4352-41fb-94af-6239e1c90b98-auth-proxy-config\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.613572 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7e82223-b810-4483-8505-82973a515276-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-76c8x\" (UID: \"c7e82223-b810-4483-8505-82973a515276\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.613884 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/c044b720-8bbd-4d48-a61b-d37188cfa478-tmpfs\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.614302 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b9890322-9811-4113-bc61-3306b91dfd5c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8tcng\" (UID: \"b9890322-9811-4113-bc61-3306b91dfd5c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.614383 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e82223-b810-4483-8505-82973a515276-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-76c8x\" (UID: \"c7e82223-b810-4483-8505-82973a515276\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.615189 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/229d7233-477b-42a8-81e3-5437db9d608e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qqcpz\" (UID: \"229d7233-477b-42a8-81e3-5437db9d608e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.620456 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d20da780-3761-4116-9d42-aa980d8ebbb8-srv-cert\") pod \"catalog-operator-68c6474976-wx7sm\" (UID: \"d20da780-3761-4116-9d42-aa980d8ebbb8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.621052 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.621084 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.621606 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/41268623-f4ac-491d-8948-c002f6eac77f-signing-key\") pod \"service-ca-9c57cc56f-ng9vc\" (UID: \"41268623-f4ac-491d-8948-c002f6eac77f\") " pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.621610 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5rsn5\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.621994 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.622996 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5rsn5\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.625005 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ecd537d5-ea50-48b2-8565-283566427e38-audit-dir\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.625712 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fc997855-51c2-423d-876b-055ecf2df450-audit-policies\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.625808 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.626916 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/fc997855-51c2-423d-876b-055ecf2df450-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.627641 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0e19f9c-f340-40c4-82c8-cebc432e7e15-serving-cert\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.627893 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sfpgz\" (UID: \"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.628303 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.628549 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/41268623-f4ac-491d-8948-c002f6eac77f-signing-cabundle\") pod \"service-ca-9c57cc56f-ng9vc\" (UID: \"41268623-f4ac-491d-8948-c002f6eac77f\") " pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.629105 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9d10933-4352-41fb-94af-6239e1c90b98-config\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.629269 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ffc95f7-12ba-4daf-b0ce-1236138a4844-config\") pod \"kube-apiserver-operator-766d6c64bb-f9nm6\" (UID: \"3ffc95f7-12ba-4daf-b0ce-1236138a4844\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.629685 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/24db15e7-f30e-47c0-8227-a38041aea560-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-g75wv\" (UID: \"24db15e7-f30e-47c0-8227-a38041aea560\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.630069 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-secret-volume\") pod \"collect-profiles-29405715-xc84q\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.630212 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/229d7233-477b-42a8-81e3-5437db9d608e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qqcpz\" (UID: \"229d7233-477b-42a8-81e3-5437db9d608e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.630331 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/fc997855-51c2-423d-876b-055ecf2df450-encryption-config\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.630667 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/4a2a1306-2eff-4fc2-ac8c-8bb461353abd-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-5gqvz\" (UID: \"4a2a1306-2eff-4fc2-ac8c-8bb461353abd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.630979 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c8ca49c1-6905-4211-a0dd-642ffd28f28c-proxy-tls\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.631265 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fc997855-51c2-423d-876b-055ecf2df450-serving-cert\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.631390 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.631718 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a9d10933-4352-41fb-94af-6239e1c90b98-machine-approver-tls\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.632200 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ffc95f7-12ba-4daf-b0ce-1236138a4844-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-f9nm6\" (UID: \"3ffc95f7-12ba-4daf-b0ce-1236138a4844\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.632237 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/fc997855-51c2-423d-876b-055ecf2df450-etcd-client\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.633231 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d750be5a-3ad8-4a6b-b959-479aacbb4f95-config\") pod \"service-ca-operator-777779d784-rt5dp\" (UID: \"d750be5a-3ad8-4a6b-b959-479aacbb4f95\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.634987 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.635727 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d750be5a-3ad8-4a6b-b959-479aacbb4f95-serving-cert\") pod \"service-ca-operator-777779d784-rt5dp\" (UID: \"d750be5a-3ad8-4a6b-b959-479aacbb4f95\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.638358 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.638613 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd93397a-9a96-4902-990a-e7524fec3dd2-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rmbtx\" (UID: \"fd93397a-9a96-4902-990a-e7524fec3dd2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.638865 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c-proxy-tls\") pod \"machine-config-controller-84d6567774-sfpgz\" (UID: \"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.639074 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c044b720-8bbd-4d48-a61b-d37188cfa478-webhook-cert\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.639153 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c044b720-8bbd-4d48-a61b-d37188cfa478-apiservice-cert\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.640681 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/001333da-a6ba-41b2-a280-8b4825dd8a41-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-l8fwr\" (UID: \"001333da-a6ba-41b2-a280-8b4825dd8a41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.646166 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.646319 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d20da780-3761-4116-9d42-aa980d8ebbb8-profile-collector-cert\") pod \"catalog-operator-68c6474976-wx7sm\" (UID: \"d20da780-3761-4116-9d42-aa980d8ebbb8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.646644 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b9890322-9811-4113-bc61-3306b91dfd5c-srv-cert\") pod \"olm-operator-6b444d44fb-8tcng\" (UID: \"b9890322-9811-4113-bc61-3306b91dfd5c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.671669 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.674205 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.675735 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.681007 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.692096 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.711614 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.732215 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.756142 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.772169 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.791816 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.812217 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.843567 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.855865 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.871994 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.891931 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.912033 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.932575 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.951737 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.971668 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 15:26:49 crc kubenswrapper[4647]: I1128 15:26:49.991735 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.012429 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.031179 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.051330 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.078310 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.091510 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.111253 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.131725 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.151371 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.172178 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.191971 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.211151 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.231124 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.252723 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.271776 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.291676 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.312123 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.331523 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.351822 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.372155 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.392834 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.413772 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.430229 4647 request.go:700] Waited for 1.007744395s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication-operator/secrets?fieldSelector=metadata.name%3Dserving-cert&limit=500&resourceVersion=0 Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.432553 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.452968 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.471676 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.491690 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.524189 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.544238 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.551876 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.571657 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.592058 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.610812 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.632018 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.652197 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.672364 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.692364 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.713346 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.733953 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.753001 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.756386 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.773053 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.792175 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.811693 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.832537 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.851988 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.872160 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.905097 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.911900 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.931837 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.952928 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.972356 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 15:26:50 crc kubenswrapper[4647]: I1128 15:26:50.993224 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.040067 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvqrw\" (UniqueName: \"kubernetes.io/projected/6b633443-1559-45ea-84d7-41ac090ad0a9-kube-api-access-kvqrw\") pod \"controller-manager-879f6c89f-vcv8n\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.053539 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2lk2\" (UniqueName: \"kubernetes.io/projected/0c3edb05-7e83-4753-bb17-23dc077830c4-kube-api-access-r2lk2\") pod \"route-controller-manager-6576b87f9c-kd9bf\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.073639 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sphrh\" (UniqueName: \"kubernetes.io/projected/4408688c-7115-4338-9b06-e30b0ed30399-kube-api-access-sphrh\") pod \"machine-api-operator-5694c8668f-6t8hg\" (UID: \"4408688c-7115-4338-9b06-e30b0ed30399\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.084995 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.116600 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.117558 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc3703bb-9abe-4db2-8988-0698f9c97957-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wwsbr\" (UID: \"dc3703bb-9abe-4db2-8988-0698f9c97957\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.133998 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/232ba417-fd39-4e22-b02f-5a911f9f8b33-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8qtsj\" (UID: \"232ba417-fd39-4e22-b02f-5a911f9f8b33\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.152738 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.172487 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.179682 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.191376 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.212355 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.236568 4647 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.254169 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.272469 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.291792 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.312065 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.331909 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.352604 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.372129 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.392884 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.401540 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.427863 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.430387 4647 request.go:700] Waited for 1.834862001s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver-operator/serviceaccounts/openshift-apiserver-operator/token Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.438833 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f485t\" (UniqueName: \"kubernetes.io/projected/24db15e7-f30e-47c0-8227-a38041aea560-kube-api-access-f485t\") pod \"multus-admission-controller-857f4d67dd-g75wv\" (UID: \"24db15e7-f30e-47c0-8227-a38041aea560\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.469497 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzsks\" (UniqueName: \"kubernetes.io/projected/229d7233-477b-42a8-81e3-5437db9d608e-kube-api-access-hzsks\") pod \"openshift-apiserver-operator-796bbdcf4f-qqcpz\" (UID: \"229d7233-477b-42a8-81e3-5437db9d608e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.480715 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc9hd\" (UniqueName: \"kubernetes.io/projected/a9d10933-4352-41fb-94af-6239e1c90b98-kube-api-access-cc9hd\") pod \"machine-approver-56656f9798-fpqv9\" (UID: \"a9d10933-4352-41fb-94af-6239e1c90b98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.500764 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2q7c\" (UniqueName: \"kubernetes.io/projected/b9890322-9811-4113-bc61-3306b91dfd5c-kube-api-access-s2q7c\") pod \"olm-operator-6b444d44fb-8tcng\" (UID: \"b9890322-9811-4113-bc61-3306b91dfd5c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.510585 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btsht\" (UniqueName: \"kubernetes.io/projected/c7e82223-b810-4483-8505-82973a515276-kube-api-access-btsht\") pod \"kube-storage-version-migrator-operator-b67b599dd-76c8x\" (UID: \"c7e82223-b810-4483-8505-82973a515276\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.514846 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.527803 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glbz5\" (UniqueName: \"kubernetes.io/projected/5e711623-78c2-4e1f-a65f-1c4d871e3d21-kube-api-access-glbz5\") pod \"downloads-7954f5f757-7z7l2\" (UID: \"5e711623-78c2-4e1f-a65f-1c4d871e3d21\") " pod="openshift-console/downloads-7954f5f757-7z7l2" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.534025 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.553739 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cpdm\" (UniqueName: \"kubernetes.io/projected/d20da780-3761-4116-9d42-aa980d8ebbb8-kube-api-access-6cpdm\") pod \"catalog-operator-68c6474976-wx7sm\" (UID: \"d20da780-3761-4116-9d42-aa980d8ebbb8\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.566925 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ffc95f7-12ba-4daf-b0ce-1236138a4844-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-f9nm6\" (UID: \"3ffc95f7-12ba-4daf-b0ce-1236138a4844\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.585698 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7r92\" (UniqueName: \"kubernetes.io/projected/fc997855-51c2-423d-876b-055ecf2df450-kube-api-access-c7r92\") pod \"apiserver-7bbb656c7d-9fl4s\" (UID: \"fc997855-51c2-423d-876b-055ecf2df450\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.609307 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4frvk\" (UniqueName: \"kubernetes.io/projected/c044b720-8bbd-4d48-a61b-d37188cfa478-kube-api-access-4frvk\") pod \"packageserver-d55dfcdfc-2s6l7\" (UID: \"c044b720-8bbd-4d48-a61b-d37188cfa478\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.629161 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prw4q\" (UniqueName: \"kubernetes.io/projected/c8ca49c1-6905-4211-a0dd-642ffd28f28c-kube-api-access-prw4q\") pod \"machine-config-operator-74547568cd-pjcrl\" (UID: \"c8ca49c1-6905-4211-a0dd-642ffd28f28c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.644815 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.648956 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzz5b\" (UniqueName: \"kubernetes.io/projected/d750be5a-3ad8-4a6b-b959-479aacbb4f95-kube-api-access-nzz5b\") pod \"service-ca-operator-777779d784-rt5dp\" (UID: \"d750be5a-3ad8-4a6b-b959-479aacbb4f95\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.654667 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.669613 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.671893 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktn2t\" (UniqueName: \"kubernetes.io/projected/c0e19f9c-f340-40c4-82c8-cebc432e7e15-kube-api-access-ktn2t\") pod \"console-operator-58897d9998-8cp4n\" (UID: \"c0e19f9c-f340-40c4-82c8-cebc432e7e15\") " pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.677876 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.689735 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgzmg\" (UniqueName: \"kubernetes.io/projected/ecd537d5-ea50-48b2-8565-283566427e38-kube-api-access-jgzmg\") pod \"oauth-openshift-558db77b4-n7kp2\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.704609 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.707699 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gn7f\" (UniqueName: \"kubernetes.io/projected/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-kube-api-access-4gn7f\") pod \"marketplace-operator-79b997595-5rsn5\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.734875 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9r9q\" (UniqueName: \"kubernetes.io/projected/fd93397a-9a96-4902-990a-e7524fec3dd2-kube-api-access-k9r9q\") pod \"package-server-manager-789f6589d5-rmbtx\" (UID: \"fd93397a-9a96-4902-990a-e7524fec3dd2\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.747817 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.760636 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlq8p\" (UniqueName: \"kubernetes.io/projected/41268623-f4ac-491d-8948-c002f6eac77f-kube-api-access-mlq8p\") pod \"service-ca-9c57cc56f-ng9vc\" (UID: \"41268623-f4ac-491d-8948-c002f6eac77f\") " pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.773137 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.784206 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx4rb\" (UniqueName: \"kubernetes.io/projected/001333da-a6ba-41b2-a280-8b4825dd8a41-kube-api-access-rx4rb\") pod \"openshift-controller-manager-operator-756b6f6bc6-l8fwr\" (UID: \"001333da-a6ba-41b2-a280-8b4825dd8a41\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.799766 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g56gz\" (UniqueName: \"kubernetes.io/projected/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-kube-api-access-g56gz\") pod \"collect-profiles-29405715-xc84q\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.801852 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.802715 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.814905 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.816596 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72d6d\" (UniqueName: \"kubernetes.io/projected/8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c-kube-api-access-72d6d\") pod \"machine-config-controller-84d6567774-sfpgz\" (UID: \"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.824407 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7z7l2" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.831295 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flkv5\" (UniqueName: \"kubernetes.io/projected/4a2a1306-2eff-4fc2-ac8c-8bb461353abd-kube-api-access-flkv5\") pod \"control-plane-machine-set-operator-78cbb6b69f-5gqvz\" (UID: \"4a2a1306-2eff-4fc2-ac8c-8bb461353abd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.841026 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:26:51 crc kubenswrapper[4647]: I1128 15:26:51.847504 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.528785 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.530174 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.530931 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.531796 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.532447 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.536496 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.539960 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-tls\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.540017 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6ab07d56-17ed-4c33-a43f-181e5ab30502-ca-trust-extracted\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.540086 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-certificates\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.540145 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: E1128 15:26:52.540845 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.040827261 +0000 UTC m=+142.888433692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.644141 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.644394 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-trusted-ca-bundle\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: E1128 15:26:52.645228 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.145186533 +0000 UTC m=+142.992792954 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.646648 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/d4458ea3-1882-4206-b1b2-885143795954-node-pullsecrets\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.646685 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/522dc69e-0abb-4f90-b033-979c2bd1ef9d-service-ca-bundle\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.646701 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2609685-750c-4716-8f93-a37049032177-service-ca-bundle\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.646717 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-etcd-serving-ca\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.646773 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-bound-sa-token\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.646805 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/24691f90-319b-4ccf-95ff-8c085f85bef9-etcd-ca\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.646869 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8bf19ca3-646a-4484-b312-0ad04033cf51-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.646888 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4458ea3-1882-4206-b1b2-885143795954-serving-cert\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.646985 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24691f90-319b-4ccf-95ff-8c085f85bef9-serving-cert\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647004 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d3360086-d51c-4932-8cfa-b166acba27dc-metrics-tls\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647034 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6hqg\" (UniqueName: \"kubernetes.io/projected/96144897-cea5-48a0-ad58-ccfa928aba03-kube-api-access-r6hqg\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647051 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcdkb\" (UniqueName: \"kubernetes.io/projected/d2229fa4-dc3e-4783-8163-c535db52c796-kube-api-access-fcdkb\") pod \"migrator-59844c95c7-98qfq\" (UID: \"d2229fa4-dc3e-4783-8163-c535db52c796\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647073 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcf09f40-829e-444a-9ff3-2cd3ca8a72a5-serving-cert\") pod \"openshift-config-operator-7777fb866f-ngdh7\" (UID: \"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647089 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dcf09f40-829e-444a-9ff3-2cd3ca8a72a5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-ngdh7\" (UID: \"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647146 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d3360086-d51c-4932-8cfa-b166acba27dc-trusted-ca\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647202 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l299t\" (UniqueName: \"kubernetes.io/projected/24691f90-319b-4ccf-95ff-8c085f85bef9-kube-api-access-l299t\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647227 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a2609685-750c-4716-8f93-a37049032177-default-certificate\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647247 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6ab07d56-17ed-4c33-a43f-181e5ab30502-ca-trust-extracted\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647286 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-config\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647301 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-image-import-ca\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647339 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-certificates\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647357 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpskb\" (UniqueName: \"kubernetes.io/projected/522dc69e-0abb-4f90-b033-979c2bd1ef9d-kube-api-access-cpskb\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647376 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-serving-cert\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647391 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/522dc69e-0abb-4f90-b033-979c2bd1ef9d-serving-cert\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647455 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/8bf19ca3-646a-4484-b312-0ad04033cf51-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647544 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6ab07d56-17ed-4c33-a43f-181e5ab30502-installation-pull-secrets\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647587 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnqt9\" (UniqueName: \"kubernetes.io/projected/00b15066-0185-4ee0-9e0d-ef9db2d4ab19-kube-api-access-fnqt9\") pod \"cluster-samples-operator-665b6dd947-4nxtl\" (UID: \"00b15066-0185-4ee0-9e0d-ef9db2d4ab19\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647603 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a2609685-750c-4716-8f93-a37049032177-metrics-certs\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647620 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6s7pp\" (UniqueName: \"kubernetes.io/projected/a2609685-750c-4716-8f93-a37049032177-kube-api-access-6s7pp\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647636 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d4458ea3-1882-4206-b1b2-885143795954-audit-dir\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647672 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-oauth-config\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647700 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/24691f90-319b-4ccf-95ff-8c085f85bef9-etcd-service-ca\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647716 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8bf19ca3-646a-4484-b312-0ad04033cf51-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647732 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a2609685-750c-4716-8f93-a37049032177-stats-auth\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647757 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2s99k\" (UniqueName: \"kubernetes.io/projected/d3360086-d51c-4932-8cfa-b166acba27dc-kube-api-access-2s99k\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647772 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d4458ea3-1882-4206-b1b2-885143795954-encryption-config\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647798 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-console-config\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647871 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/522dc69e-0abb-4f90-b033-979c2bd1ef9d-config\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647901 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-oauth-serving-cert\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.647967 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-audit\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.648054 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-trusted-ca\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.648099 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-tls\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.648165 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs677\" (UniqueName: \"kubernetes.io/projected/dcf09f40-829e-444a-9ff3-2cd3ca8a72a5-kube-api-access-xs677\") pod \"openshift-config-operator-7777fb866f-ngdh7\" (UID: \"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.648206 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xb6xf\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-kube-api-access-xb6xf\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.648222 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d3360086-d51c-4932-8cfa-b166acba27dc-bound-sa-token\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.648237 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/24691f90-319b-4ccf-95ff-8c085f85bef9-etcd-client\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.650287 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6ab07d56-17ed-4c33-a43f-181e5ab30502-ca-trust-extracted\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.653621 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/522dc69e-0abb-4f90-b033-979c2bd1ef9d-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.653960 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/00b15066-0185-4ee0-9e0d-ef9db2d4ab19-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-4nxtl\" (UID: \"00b15066-0185-4ee0-9e0d-ef9db2d4ab19\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.653987 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d4458ea3-1882-4206-b1b2-885143795954-etcd-client\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.656750 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.656822 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24691f90-319b-4ccf-95ff-8c085f85bef9-config\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: E1128 15:26:52.659242 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.15920646 +0000 UTC m=+143.006812881 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.662391 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-service-ca\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.662790 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvbqb\" (UniqueName: \"kubernetes.io/projected/8bf19ca3-646a-4484-b312-0ad04033cf51-kube-api-access-rvbqb\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.662862 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbkgw\" (UniqueName: \"kubernetes.io/projected/e47f3602-639f-459a-a9e1-55695c70bd96-kube-api-access-pbkgw\") pod \"dns-operator-744455d44c-ql2nw\" (UID: \"e47f3602-639f-459a-a9e1-55695c70bd96\") " pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.663048 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e47f3602-639f-459a-a9e1-55695c70bd96-metrics-tls\") pod \"dns-operator-744455d44c-ql2nw\" (UID: \"e47f3602-639f-459a-a9e1-55695c70bd96\") " pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.663069 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxvrv\" (UniqueName: \"kubernetes.io/projected/d4458ea3-1882-4206-b1b2-885143795954-kube-api-access-kxvrv\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.663121 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-trusted-ca-bundle\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.671566 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-certificates\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.707305 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-tls\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765369 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765667 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpskb\" (UniqueName: \"kubernetes.io/projected/522dc69e-0abb-4f90-b033-979c2bd1ef9d-kube-api-access-cpskb\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765709 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-serving-cert\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765725 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/522dc69e-0abb-4f90-b033-979c2bd1ef9d-serving-cert\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765752 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/8b81aefd-2f13-4986-a339-2accc815813a-node-bootstrap-token\") pod \"machine-config-server-xqbx6\" (UID: \"8b81aefd-2f13-4986-a339-2accc815813a\") " pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765781 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/8b81aefd-2f13-4986-a339-2accc815813a-certs\") pod \"machine-config-server-xqbx6\" (UID: \"8b81aefd-2f13-4986-a339-2accc815813a\") " pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765801 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a2e3836a-3363-4908-8217-57a6d1737d91-metrics-tls\") pod \"dns-default-lkh97\" (UID: \"a2e3836a-3363-4908-8217-57a6d1737d91\") " pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765821 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/8bf19ca3-646a-4484-b312-0ad04033cf51-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765840 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6ab07d56-17ed-4c33-a43f-181e5ab30502-installation-pull-secrets\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765855 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-registration-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765871 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2e3836a-3363-4908-8217-57a6d1737d91-config-volume\") pod \"dns-default-lkh97\" (UID: \"a2e3836a-3363-4908-8217-57a6d1737d91\") " pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765900 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnqt9\" (UniqueName: \"kubernetes.io/projected/00b15066-0185-4ee0-9e0d-ef9db2d4ab19-kube-api-access-fnqt9\") pod \"cluster-samples-operator-665b6dd947-4nxtl\" (UID: \"00b15066-0185-4ee0-9e0d-ef9db2d4ab19\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765943 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a2609685-750c-4716-8f93-a37049032177-metrics-certs\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765958 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6s7pp\" (UniqueName: \"kubernetes.io/projected/a2609685-750c-4716-8f93-a37049032177-kube-api-access-6s7pp\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765978 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d4458ea3-1882-4206-b1b2-885143795954-audit-dir\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.765996 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c46l\" (UniqueName: \"kubernetes.io/projected/00e78d31-cbbb-4ee5-b687-14e01b2761df-kube-api-access-5c46l\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766014 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-oauth-config\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766030 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-mountpoint-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766051 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/24691f90-319b-4ccf-95ff-8c085f85bef9-etcd-service-ca\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766067 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8bf19ca3-646a-4484-b312-0ad04033cf51-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766084 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a2609685-750c-4716-8f93-a37049032177-stats-auth\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766101 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2s99k\" (UniqueName: \"kubernetes.io/projected/d3360086-d51c-4932-8cfa-b166acba27dc-kube-api-access-2s99k\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766119 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d4458ea3-1882-4206-b1b2-885143795954-encryption-config\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766136 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-console-config\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766171 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e08dbbbf-0bcb-42b8-9490-82434c360f01-cert\") pod \"ingress-canary-tg7kf\" (UID: \"e08dbbbf-0bcb-42b8-9490-82434c360f01\") " pod="openshift-ingress-canary/ingress-canary-tg7kf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766189 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/522dc69e-0abb-4f90-b033-979c2bd1ef9d-config\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766205 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-oauth-serving-cert\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766223 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-audit\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766243 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt4hb\" (UniqueName: \"kubernetes.io/projected/e08dbbbf-0bcb-42b8-9490-82434c360f01-kube-api-access-nt4hb\") pod \"ingress-canary-tg7kf\" (UID: \"e08dbbbf-0bcb-42b8-9490-82434c360f01\") " pod="openshift-ingress-canary/ingress-canary-tg7kf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766264 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-trusted-ca\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766289 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs677\" (UniqueName: \"kubernetes.io/projected/dcf09f40-829e-444a-9ff3-2cd3ca8a72a5-kube-api-access-xs677\") pod \"openshift-config-operator-7777fb866f-ngdh7\" (UID: \"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766317 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xb6xf\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-kube-api-access-xb6xf\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766337 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d3360086-d51c-4932-8cfa-b166acba27dc-bound-sa-token\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766354 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/24691f90-319b-4ccf-95ff-8c085f85bef9-etcd-client\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766378 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/522dc69e-0abb-4f90-b033-979c2bd1ef9d-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.766395 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/00b15066-0185-4ee0-9e0d-ef9db2d4ab19-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-4nxtl\" (UID: \"00b15066-0185-4ee0-9e0d-ef9db2d4ab19\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767181 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d4458ea3-1882-4206-b1b2-885143795954-etcd-client\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767222 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24691f90-319b-4ccf-95ff-8c085f85bef9-config\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767240 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-service-ca\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767264 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvbqb\" (UniqueName: \"kubernetes.io/projected/8bf19ca3-646a-4484-b312-0ad04033cf51-kube-api-access-rvbqb\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767285 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbkgw\" (UniqueName: \"kubernetes.io/projected/e47f3602-639f-459a-a9e1-55695c70bd96-kube-api-access-pbkgw\") pod \"dns-operator-744455d44c-ql2nw\" (UID: \"e47f3602-639f-459a-a9e1-55695c70bd96\") " pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767304 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e47f3602-639f-459a-a9e1-55695c70bd96-metrics-tls\") pod \"dns-operator-744455d44c-ql2nw\" (UID: \"e47f3602-639f-459a-a9e1-55695c70bd96\") " pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767332 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxvrv\" (UniqueName: \"kubernetes.io/projected/d4458ea3-1882-4206-b1b2-885143795954-kube-api-access-kxvrv\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767351 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-trusted-ca-bundle\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767373 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-trusted-ca-bundle\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767394 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvk5h\" (UniqueName: \"kubernetes.io/projected/8b81aefd-2f13-4986-a339-2accc815813a-kube-api-access-pvk5h\") pod \"machine-config-server-xqbx6\" (UID: \"8b81aefd-2f13-4986-a339-2accc815813a\") " pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767427 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/d4458ea3-1882-4206-b1b2-885143795954-node-pullsecrets\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767446 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/522dc69e-0abb-4f90-b033-979c2bd1ef9d-service-ca-bundle\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767464 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2609685-750c-4716-8f93-a37049032177-service-ca-bundle\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767483 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-etcd-serving-ca\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767503 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-bound-sa-token\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767524 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/24691f90-319b-4ccf-95ff-8c085f85bef9-etcd-ca\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767546 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-socket-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767605 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8bf19ca3-646a-4484-b312-0ad04033cf51-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767622 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4458ea3-1882-4206-b1b2-885143795954-serving-cert\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767641 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22dnb\" (UniqueName: \"kubernetes.io/projected/a2e3836a-3363-4908-8217-57a6d1737d91-kube-api-access-22dnb\") pod \"dns-default-lkh97\" (UID: \"a2e3836a-3363-4908-8217-57a6d1737d91\") " pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767672 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24691f90-319b-4ccf-95ff-8c085f85bef9-serving-cert\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767706 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d3360086-d51c-4932-8cfa-b166acba27dc-metrics-tls\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767723 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-plugins-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767747 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6hqg\" (UniqueName: \"kubernetes.io/projected/96144897-cea5-48a0-ad58-ccfa928aba03-kube-api-access-r6hqg\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767766 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcdkb\" (UniqueName: \"kubernetes.io/projected/d2229fa4-dc3e-4783-8163-c535db52c796-kube-api-access-fcdkb\") pod \"migrator-59844c95c7-98qfq\" (UID: \"d2229fa4-dc3e-4783-8163-c535db52c796\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767789 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcf09f40-829e-444a-9ff3-2cd3ca8a72a5-serving-cert\") pod \"openshift-config-operator-7777fb866f-ngdh7\" (UID: \"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767807 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-csi-data-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767825 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dcf09f40-829e-444a-9ff3-2cd3ca8a72a5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-ngdh7\" (UID: \"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767853 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d3360086-d51c-4932-8cfa-b166acba27dc-trusted-ca\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767876 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l299t\" (UniqueName: \"kubernetes.io/projected/24691f90-319b-4ccf-95ff-8c085f85bef9-kube-api-access-l299t\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.767893 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a2609685-750c-4716-8f93-a37049032177-default-certificate\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.768000 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-config\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.768030 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-image-import-ca\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: E1128 15:26:52.769691 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.269659534 +0000 UTC m=+143.117265955 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.769708 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-image-import-ca\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.770254 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/24691f90-319b-4ccf-95ff-8c085f85bef9-etcd-ca\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.771136 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8bf19ca3-646a-4484-b312-0ad04033cf51-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.777107 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcf09f40-829e-444a-9ff3-2cd3ca8a72a5-serving-cert\") pod \"openshift-config-operator-7777fb866f-ngdh7\" (UID: \"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.777652 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dcf09f40-829e-444a-9ff3-2cd3ca8a72a5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-ngdh7\" (UID: \"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.780188 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-config\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.780326 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/522dc69e-0abb-4f90-b033-979c2bd1ef9d-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.780589 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d3360086-d51c-4932-8cfa-b166acba27dc-trusted-ca\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.786772 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/d4458ea3-1882-4206-b1b2-885143795954-node-pullsecrets\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.787241 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d4458ea3-1882-4206-b1b2-885143795954-audit-dir\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.787334 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/522dc69e-0abb-4f90-b033-979c2bd1ef9d-service-ca-bundle\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.787609 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-trusted-ca-bundle\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.787986 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-trusted-ca-bundle\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.788199 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a2609685-750c-4716-8f93-a37049032177-service-ca-bundle\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.788814 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4458ea3-1882-4206-b1b2-885143795954-serving-cert\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.789124 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24691f90-319b-4ccf-95ff-8c085f85bef9-serving-cert\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.789713 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24691f90-319b-4ccf-95ff-8c085f85bef9-config\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.790512 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-service-ca\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.793859 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/24691f90-319b-4ccf-95ff-8c085f85bef9-etcd-service-ca\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.793985 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-audit\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.799719 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d4458ea3-1882-4206-b1b2-885143795954-etcd-client\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.800803 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-console-config\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.801931 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-oauth-serving-cert\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.802783 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d4458ea3-1882-4206-b1b2-885143795954-etcd-serving-ca\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.804617 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/00b15066-0185-4ee0-9e0d-ef9db2d4ab19-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-4nxtl\" (UID: \"00b15066-0185-4ee0-9e0d-ef9db2d4ab19\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.805000 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d3360086-d51c-4932-8cfa-b166acba27dc-metrics-tls\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.805142 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d3360086-d51c-4932-8cfa-b166acba27dc-bound-sa-token\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.805437 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/522dc69e-0abb-4f90-b033-979c2bd1ef9d-serving-cert\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.805890 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6ab07d56-17ed-4c33-a43f-181e5ab30502-installation-pull-secrets\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.805908 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a2609685-750c-4716-8f93-a37049032177-default-certificate\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.807151 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-oauth-config\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.807354 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e47f3602-639f-459a-a9e1-55695c70bd96-metrics-tls\") pod \"dns-operator-744455d44c-ql2nw\" (UID: \"e47f3602-639f-459a-a9e1-55695c70bd96\") " pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.807474 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/24691f90-319b-4ccf-95ff-8c085f85bef9-etcd-client\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.808231 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-trusted-ca\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.808657 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a2609685-750c-4716-8f93-a37049032177-stats-auth\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.809689 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-serving-cert\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.809959 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a2609685-750c-4716-8f93-a37049032177-metrics-certs\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.811080 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-bound-sa-token\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.811763 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6hqg\" (UniqueName: \"kubernetes.io/projected/96144897-cea5-48a0-ad58-ccfa928aba03-kube-api-access-r6hqg\") pod \"console-f9d7485db-z99bl\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.812006 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d4458ea3-1882-4206-b1b2-885143795954-encryption-config\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.814062 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/522dc69e-0abb-4f90-b033-979c2bd1ef9d-config\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.835156 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/8bf19ca3-646a-4484-b312-0ad04033cf51-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.836439 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnqt9\" (UniqueName: \"kubernetes.io/projected/00b15066-0185-4ee0-9e0d-ef9db2d4ab19-kube-api-access-fnqt9\") pod \"cluster-samples-operator-665b6dd947-4nxtl\" (UID: \"00b15066-0185-4ee0-9e0d-ef9db2d4ab19\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.868872 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.868934 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvk5h\" (UniqueName: \"kubernetes.io/projected/8b81aefd-2f13-4986-a339-2accc815813a-kube-api-access-pvk5h\") pod \"machine-config-server-xqbx6\" (UID: \"8b81aefd-2f13-4986-a339-2accc815813a\") " pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.868956 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-socket-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.868974 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22dnb\" (UniqueName: \"kubernetes.io/projected/a2e3836a-3363-4908-8217-57a6d1737d91-kube-api-access-22dnb\") pod \"dns-default-lkh97\" (UID: \"a2e3836a-3363-4908-8217-57a6d1737d91\") " pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.868997 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-plugins-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.869018 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-csi-data-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.869057 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/8b81aefd-2f13-4986-a339-2accc815813a-node-bootstrap-token\") pod \"machine-config-server-xqbx6\" (UID: \"8b81aefd-2f13-4986-a339-2accc815813a\") " pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.869074 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/8b81aefd-2f13-4986-a339-2accc815813a-certs\") pod \"machine-config-server-xqbx6\" (UID: \"8b81aefd-2f13-4986-a339-2accc815813a\") " pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.869091 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a2e3836a-3363-4908-8217-57a6d1737d91-metrics-tls\") pod \"dns-default-lkh97\" (UID: \"a2e3836a-3363-4908-8217-57a6d1737d91\") " pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.869112 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-registration-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.869129 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2e3836a-3363-4908-8217-57a6d1737d91-config-volume\") pod \"dns-default-lkh97\" (UID: \"a2e3836a-3363-4908-8217-57a6d1737d91\") " pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.869156 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c46l\" (UniqueName: \"kubernetes.io/projected/00e78d31-cbbb-4ee5-b687-14e01b2761df-kube-api-access-5c46l\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.869173 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-mountpoint-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.869221 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e08dbbbf-0bcb-42b8-9490-82434c360f01-cert\") pod \"ingress-canary-tg7kf\" (UID: \"e08dbbbf-0bcb-42b8-9490-82434c360f01\") " pod="openshift-ingress-canary/ingress-canary-tg7kf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.869245 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt4hb\" (UniqueName: \"kubernetes.io/projected/e08dbbbf-0bcb-42b8-9490-82434c360f01-kube-api-access-nt4hb\") pod \"ingress-canary-tg7kf\" (UID: \"e08dbbbf-0bcb-42b8-9490-82434c360f01\") " pod="openshift-ingress-canary/ingress-canary-tg7kf" Nov 28 15:26:52 crc kubenswrapper[4647]: E1128 15:26:52.869862 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.369847668 +0000 UTC m=+143.217454089 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.870370 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-socket-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.870478 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-plugins-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.870551 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-csi-data-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.871392 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-mountpoint-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.872609 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/00e78d31-cbbb-4ee5-b687-14e01b2761df-registration-dir\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.875037 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcdkb\" (UniqueName: \"kubernetes.io/projected/d2229fa4-dc3e-4783-8163-c535db52c796-kube-api-access-fcdkb\") pod \"migrator-59844c95c7-98qfq\" (UID: \"d2229fa4-dc3e-4783-8163-c535db52c796\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.876227 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/8b81aefd-2f13-4986-a339-2accc815813a-node-bootstrap-token\") pod \"machine-config-server-xqbx6\" (UID: \"8b81aefd-2f13-4986-a339-2accc815813a\") " pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.877027 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/8b81aefd-2f13-4986-a339-2accc815813a-certs\") pod \"machine-config-server-xqbx6\" (UID: \"8b81aefd-2f13-4986-a339-2accc815813a\") " pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.877756 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2e3836a-3363-4908-8217-57a6d1737d91-config-volume\") pod \"dns-default-lkh97\" (UID: \"a2e3836a-3363-4908-8217-57a6d1737d91\") " pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.878936 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e08dbbbf-0bcb-42b8-9490-82434c360f01-cert\") pod \"ingress-canary-tg7kf\" (UID: \"e08dbbbf-0bcb-42b8-9490-82434c360f01\") " pod="openshift-ingress-canary/ingress-canary-tg7kf" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.883334 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vcv8n"] Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.885978 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a2e3836a-3363-4908-8217-57a6d1737d91-metrics-tls\") pod \"dns-default-lkh97\" (UID: \"a2e3836a-3363-4908-8217-57a6d1737d91\") " pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.903053 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l299t\" (UniqueName: \"kubernetes.io/projected/24691f90-319b-4ccf-95ff-8c085f85bef9-kube-api-access-l299t\") pod \"etcd-operator-b45778765-vklkx\" (UID: \"24691f90-319b-4ccf-95ff-8c085f85bef9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.908991 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpskb\" (UniqueName: \"kubernetes.io/projected/522dc69e-0abb-4f90-b033-979c2bd1ef9d-kube-api-access-cpskb\") pod \"authentication-operator-69f744f599-k2k77\" (UID: \"522dc69e-0abb-4f90-b033-979c2bd1ef9d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.939381 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxvrv\" (UniqueName: \"kubernetes.io/projected/d4458ea3-1882-4206-b1b2-885143795954-kube-api-access-kxvrv\") pod \"apiserver-76f77b778f-l52jr\" (UID: \"d4458ea3-1882-4206-b1b2-885143795954\") " pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.953970 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2s99k\" (UniqueName: \"kubernetes.io/projected/d3360086-d51c-4932-8cfa-b166acba27dc-kube-api-access-2s99k\") pod \"ingress-operator-5b745b69d9-2ld6n\" (UID: \"d3360086-d51c-4932-8cfa-b166acba27dc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.973704 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:52 crc kubenswrapper[4647]: E1128 15:26:52.974399 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.474379525 +0000 UTC m=+143.321985946 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.974458 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:52 crc kubenswrapper[4647]: E1128 15:26:52.977365 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.477320238 +0000 UTC m=+143.324926659 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:52 crc kubenswrapper[4647]: I1128 15:26:52.983860 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6s7pp\" (UniqueName: \"kubernetes.io/projected/a2609685-750c-4716-8f93-a37049032177-kube-api-access-6s7pp\") pod \"router-default-5444994796-t88ct\" (UID: \"a2609685-750c-4716-8f93-a37049032177\") " pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.001127 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6t8hg"] Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.008953 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvbqb\" (UniqueName: \"kubernetes.io/projected/8bf19ca3-646a-4484-b312-0ad04033cf51-kube-api-access-rvbqb\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.026972 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8bf19ca3-646a-4484-b312-0ad04033cf51-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-md8hf\" (UID: \"8bf19ca3-646a-4484-b312-0ad04033cf51\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.032422 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.035690 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbkgw\" (UniqueName: \"kubernetes.io/projected/e47f3602-639f-459a-a9e1-55695c70bd96-kube-api-access-pbkgw\") pod \"dns-operator-744455d44c-ql2nw\" (UID: \"e47f3602-639f-459a-a9e1-55695c70bd96\") " pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.061445 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.065258 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs677\" (UniqueName: \"kubernetes.io/projected/dcf09f40-829e-444a-9ff3-2cd3ca8a72a5-kube-api-access-xs677\") pod \"openshift-config-operator-7777fb866f-ngdh7\" (UID: \"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.074108 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.079841 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:53 crc kubenswrapper[4647]: E1128 15:26:53.080286 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.58026563 +0000 UTC m=+143.427872051 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.080865 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.081714 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xb6xf\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-kube-api-access-xb6xf\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:53 crc kubenswrapper[4647]: W1128 15:26:53.097480 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b633443_1559_45ea_84d7_41ac090ad0a9.slice/crio-30157d87f6b3d6381dfa461ecadf06f746b26bc4ab9fb1b30d637bdf8fbcd22f WatchSource:0}: Error finding container 30157d87f6b3d6381dfa461ecadf06f746b26bc4ab9fb1b30d637bdf8fbcd22f: Status 404 returned error can't find the container with id 30157d87f6b3d6381dfa461ecadf06f746b26bc4ab9fb1b30d637bdf8fbcd22f Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.097578 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.097643 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.105840 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.114940 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.122806 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.124205 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt4hb\" (UniqueName: \"kubernetes.io/projected/e08dbbbf-0bcb-42b8-9490-82434c360f01-kube-api-access-nt4hb\") pod \"ingress-canary-tg7kf\" (UID: \"e08dbbbf-0bcb-42b8-9490-82434c360f01\") " pod="openshift-ingress-canary/ingress-canary-tg7kf" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.139509 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr"] Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.146866 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf"] Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.146897 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvk5h\" (UniqueName: \"kubernetes.io/projected/8b81aefd-2f13-4986-a339-2accc815813a-kube-api-access-pvk5h\") pod \"machine-config-server-xqbx6\" (UID: \"8b81aefd-2f13-4986-a339-2accc815813a\") " pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.161453 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22dnb\" (UniqueName: \"kubernetes.io/projected/a2e3836a-3363-4908-8217-57a6d1737d91-kube-api-access-22dnb\") pod \"dns-default-lkh97\" (UID: \"a2e3836a-3363-4908-8217-57a6d1737d91\") " pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.169223 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c46l\" (UniqueName: \"kubernetes.io/projected/00e78d31-cbbb-4ee5-b687-14e01b2761df-kube-api-access-5c46l\") pod \"csi-hostpathplugin-glkws\" (UID: \"00e78d31-cbbb-4ee5-b687-14e01b2761df\") " pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.173435 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.189744 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-tg7kf" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.191028 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:53 crc kubenswrapper[4647]: E1128 15:26:53.194019 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.693999167 +0000 UTC m=+143.541605588 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.205948 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" event={"ID":"a9d10933-4352-41fb-94af-6239e1c90b98","Type":"ContainerStarted","Data":"5f346c700b74168634d36054758f3e4310d3afc4e6e525cc31aa5ad382c3efc9"} Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.209335 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj"] Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.210691 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" event={"ID":"6b633443-1559-45ea-84d7-41ac090ad0a9","Type":"ContainerStarted","Data":"30157d87f6b3d6381dfa461ecadf06f746b26bc4ab9fb1b30d637bdf8fbcd22f"} Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.216790 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-glkws" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.226382 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-lkh97" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.238738 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-xqbx6" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.294124 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:53 crc kubenswrapper[4647]: E1128 15:26:53.294682 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.794664975 +0000 UTC m=+143.642271396 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.339541 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.391618 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp"] Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.399873 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:53 crc kubenswrapper[4647]: E1128 15:26:53.400259 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:53.900243941 +0000 UTC m=+143.747850362 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.442350 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-g75wv"] Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.501048 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:53 crc kubenswrapper[4647]: E1128 15:26:53.501782 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.001763533 +0000 UTC m=+143.849369954 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:53 crc kubenswrapper[4647]: W1128 15:26:53.570672 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod232ba417_fd39_4e22_b02f_5a911f9f8b33.slice/crio-ebcd90b05bd5a058feed1850e9adc8da86a88eebcf8d215df9299838481dc4cc WatchSource:0}: Error finding container ebcd90b05bd5a058feed1850e9adc8da86a88eebcf8d215df9299838481dc4cc: Status 404 returned error can't find the container with id ebcd90b05bd5a058feed1850e9adc8da86a88eebcf8d215df9299838481dc4cc Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.604278 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:53 crc kubenswrapper[4647]: E1128 15:26:53.604705 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.104689754 +0000 UTC m=+143.952296175 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:53 crc kubenswrapper[4647]: W1128 15:26:53.651862 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd750be5a_3ad8_4a6b_b959_479aacbb4f95.slice/crio-8cca5200891a459fe819bbc3e8126d7b7d10cb65138a18606921bf073c509464 WatchSource:0}: Error finding container 8cca5200891a459fe819bbc3e8126d7b7d10cb65138a18606921bf073c509464: Status 404 returned error can't find the container with id 8cca5200891a459fe819bbc3e8126d7b7d10cb65138a18606921bf073c509464 Nov 28 15:26:53 crc kubenswrapper[4647]: W1128 15:26:53.681964 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24db15e7_f30e_47c0_8227_a38041aea560.slice/crio-7b5707b4a6c513dfd8679ed15eac0780a83cb8e304a7449773f61de92abc8900 WatchSource:0}: Error finding container 7b5707b4a6c513dfd8679ed15eac0780a83cb8e304a7449773f61de92abc8900: Status 404 returned error can't find the container with id 7b5707b4a6c513dfd8679ed15eac0780a83cb8e304a7449773f61de92abc8900 Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.706206 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:53 crc kubenswrapper[4647]: E1128 15:26:53.706826 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.206789943 +0000 UTC m=+144.054396364 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.811334 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:53 crc kubenswrapper[4647]: E1128 15:26:53.811814 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.311799683 +0000 UTC m=+144.159406104 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.913030 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:53 crc kubenswrapper[4647]: E1128 15:26:53.913168 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.4131407 +0000 UTC m=+144.260747131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.913627 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:53 crc kubenswrapper[4647]: E1128 15:26:53.914032 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.414022285 +0000 UTC m=+144.261628716 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.944096 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz"] Nov 28 15:26:53 crc kubenswrapper[4647]: I1128 15:26:53.952221 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.014837 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.014973 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.514951408 +0000 UTC m=+144.362557829 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.015500 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.016354 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.516332948 +0000 UTC m=+144.363939369 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.065865 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-n7kp2"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.079885 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-8cp4n"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.085087 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s"] Nov 28 15:26:54 crc kubenswrapper[4647]: W1128 15:26:54.104245 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b81aefd_2f13_4986_a339_2accc815813a.slice/crio-84146409bb7234c3fc8a3d11fa534dce6efbbbf143d7fa36be39851628bb0c51 WatchSource:0}: Error finding container 84146409bb7234c3fc8a3d11fa534dce6efbbbf143d7fa36be39851628bb0c51: Status 404 returned error can't find the container with id 84146409bb7234c3fc8a3d11fa534dce6efbbbf143d7fa36be39851628bb0c51 Nov 28 15:26:54 crc kubenswrapper[4647]: W1128 15:26:54.106287 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc044b720_8bbd_4d48_a61b_d37188cfa478.slice/crio-c7e6695a355c40d8f54200ac7f42f0824d20e944aa341cb3fe64452d9e12a0ae WatchSource:0}: Error finding container c7e6695a355c40d8f54200ac7f42f0824d20e944aa341cb3fe64452d9e12a0ae: Status 404 returned error can't find the container with id c7e6695a355c40d8f54200ac7f42f0824d20e944aa341cb3fe64452d9e12a0ae Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.111457 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.118775 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.119137 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.619101014 +0000 UTC m=+144.466707435 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.119363 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.119969 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.619961589 +0000 UTC m=+144.467568010 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.222709 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.223282 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.723262081 +0000 UTC m=+144.570868502 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.244367 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" event={"ID":"24db15e7-f30e-47c0-8227-a38041aea560","Type":"ContainerStarted","Data":"7b5707b4a6c513dfd8679ed15eac0780a83cb8e304a7449773f61de92abc8900"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.257150 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" event={"ID":"ecd537d5-ea50-48b2-8565-283566427e38","Type":"ContainerStarted","Data":"236e2769c52be860b97953b0332228879a9967a0035ea0bd20c056c43213150c"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.262048 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" event={"ID":"0c3edb05-7e83-4753-bb17-23dc077830c4","Type":"ContainerStarted","Data":"24bb61e46a03446cb6e1e0be765dd7bb48266cefa3c76ba21fe9e56a6b078f79"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.270553 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" event={"ID":"229d7233-477b-42a8-81e3-5437db9d608e","Type":"ContainerStarted","Data":"96f0fa848d4493e0bdb7173f728e9efa344eef59b5781d502ab4af8d192c926f"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.324051 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.324546 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.824527335 +0000 UTC m=+144.672133756 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.345486 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" event={"ID":"4408688c-7115-4338-9b06-e30b0ed30399","Type":"ContainerStarted","Data":"80d89c687e5a6efbb024ac038f19288ec006402b3c31e4bf3fb331250a455f8c"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.345537 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" event={"ID":"4408688c-7115-4338-9b06-e30b0ed30399","Type":"ContainerStarted","Data":"95da7e537ee981f77e9424a01c4888024065cf03ea2c37f6f62d230381a79247"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.378255 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-8cp4n" event={"ID":"c0e19f9c-f340-40c4-82c8-cebc432e7e15","Type":"ContainerStarted","Data":"47c03b593753e624ef1ad010549ddd4d988ccc794ca5b60066731b564627101e"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.428733 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.429625 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.929502975 +0000 UTC m=+144.777109396 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.429766 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.430393 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:54.930378839 +0000 UTC m=+144.777985260 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.464153 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.464185 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.464197 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.464208 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.464220 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.464231 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.464560 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" event={"ID":"6b633443-1559-45ea-84d7-41ac090ad0a9","Type":"ContainerStarted","Data":"3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.464993 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.466187 4647 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-vcv8n container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.466227 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" podUID="6b633443-1559-45ea-84d7-41ac090ad0a9" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.478204 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" event={"ID":"232ba417-fd39-4e22-b02f-5a911f9f8b33","Type":"ContainerStarted","Data":"ebcd90b05bd5a058feed1850e9adc8da86a88eebcf8d215df9299838481dc4cc"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.481697 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" event={"ID":"dc3703bb-9abe-4db2-8988-0698f9c97957","Type":"ContainerStarted","Data":"a393facf0fd4b126c025049c795da2f7bc7eeb9738fa5b9692d6386adf4b32ac"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.492096 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-z99bl"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.499634 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" podStartSLOduration=122.499617828 podStartE2EDuration="2m2.499617828s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:54.498902838 +0000 UTC m=+144.346509259" watchObservedRunningTime="2025-11-28 15:26:54.499617828 +0000 UTC m=+144.347224239" Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.514829 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-t88ct" event={"ID":"a2609685-750c-4716-8f93-a37049032177","Type":"ContainerStarted","Data":"66f217b405709f511505240d69d6109257560fc675f707807875d4d4f2af9b81"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.523071 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.541356 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.542013 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.041994507 +0000 UTC m=+144.889600928 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.566751 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" event={"ID":"c044b720-8bbd-4d48-a61b-d37188cfa478","Type":"ContainerStarted","Data":"c7e6695a355c40d8f54200ac7f42f0824d20e944aa341cb3fe64452d9e12a0ae"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.588946 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.602682 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-xqbx6" event={"ID":"8b81aefd-2f13-4986-a339-2accc815813a","Type":"ContainerStarted","Data":"84146409bb7234c3fc8a3d11fa534dce6efbbbf143d7fa36be39851628bb0c51"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.616793 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" event={"ID":"d750be5a-3ad8-4a6b-b959-479aacbb4f95","Type":"ContainerStarted","Data":"8cca5200891a459fe819bbc3e8126d7b7d10cb65138a18606921bf073c509464"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.622288 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7z7l2"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.645687 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.646747 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.146722339 +0000 UTC m=+144.994328870 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.654564 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.679307 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" event={"ID":"a9d10933-4352-41fb-94af-6239e1c90b98","Type":"ContainerStarted","Data":"9f7f5ae4b6a72c47f75c9d4171867040321d412f6d5b2131dd2ff99d88844d7a"} Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.695597 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-ng9vc"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.702845 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5rsn5"] Nov 28 15:26:54 crc kubenswrapper[4647]: W1128 15:26:54.732514 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5e71bcd_07ad_4ec9_a9e1_aafd119c8fd6.slice/crio-9e18102ea52b0bcb3258ee01b310e1283354303143dff520d447794dfe8f100a WatchSource:0}: Error finding container 9e18102ea52b0bcb3258ee01b310e1283354303143dff520d447794dfe8f100a: Status 404 returned error can't find the container with id 9e18102ea52b0bcb3258ee01b310e1283354303143dff520d447794dfe8f100a Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.749178 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.750348 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.25031967 +0000 UTC m=+145.097926091 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: W1128 15:26:54.752041 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod96144897_cea5_48a0_ad58_ccfa928aba03.slice/crio-9202dedae5f86040a0c28a873c50ce5c66b36fa4c7285a6cf99113d35c71377c WatchSource:0}: Error finding container 9202dedae5f86040a0c28a873c50ce5c66b36fa4c7285a6cf99113d35c71377c: Status 404 returned error can't find the container with id 9202dedae5f86040a0c28a873c50ce5c66b36fa4c7285a6cf99113d35c71377c Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.830131 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.854171 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.855656 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-k2k77"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.864064 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.864384 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.364370866 +0000 UTC m=+145.211977287 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.888556 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl"] Nov 28 15:26:54 crc kubenswrapper[4647]: W1128 15:26:54.933534 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod522dc69e_0abb_4f90_b033_979c2bd1ef9d.slice/crio-e4c03e835e2ae16e58cc73e8f16adafbc5a3d064eaee084dcede187ba5185003 WatchSource:0}: Error finding container e4c03e835e2ae16e58cc73e8f16adafbc5a3d064eaee084dcede187ba5185003: Status 404 returned error can't find the container with id e4c03e835e2ae16e58cc73e8f16adafbc5a3d064eaee084dcede187ba5185003 Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.944170 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-vklkx"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.944223 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-tg7kf"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.946391 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-ql2nw"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.948539 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-glkws"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.949347 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-l52jr"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.965297 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:54 crc kubenswrapper[4647]: E1128 15:26:54.965888 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.465867257 +0000 UTC m=+145.313473678 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.967461 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.976312 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7"] Nov 28 15:26:54 crc kubenswrapper[4647]: I1128 15:26:54.993789 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-lkh97"] Nov 28 15:26:55 crc kubenswrapper[4647]: W1128 15:26:55.061603 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4458ea3_1882_4206_b1b2_885143795954.slice/crio-c26f3513674a36dca16a8412791a6df97daa1b3abf8bc69038056b604a8268d5 WatchSource:0}: Error finding container c26f3513674a36dca16a8412791a6df97daa1b3abf8bc69038056b604a8268d5: Status 404 returned error can't find the container with id c26f3513674a36dca16a8412791a6df97daa1b3abf8bc69038056b604a8268d5 Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.068325 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.068923 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.568906621 +0000 UTC m=+145.416513042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.170289 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.171330 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.671304828 +0000 UTC m=+145.518911249 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.271937 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.272462 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.772446779 +0000 UTC m=+145.620053200 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.373035 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.373257 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.87323811 +0000 UTC m=+145.720844531 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.373754 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.374433 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.874397483 +0000 UTC m=+145.722003904 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.474652 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.475048 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.974999938 +0000 UTC m=+145.822606359 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.475702 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.476087 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:55.976070419 +0000 UTC m=+145.823676840 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.583636 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.583962 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:56.08394051 +0000 UTC m=+145.931546931 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.685548 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.685944 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:56.185928755 +0000 UTC m=+146.033535176 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.695517 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" event={"ID":"0c3edb05-7e83-4753-bb17-23dc077830c4","Type":"ContainerStarted","Data":"dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.696502 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.703110 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" event={"ID":"8bf19ca3-646a-4484-b312-0ad04033cf51","Type":"ContainerStarted","Data":"32899332f36b5d6df19da80414af7079e90a39a6e02ea2144fd8f3a4655fbb0f"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.703982 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" event={"ID":"fd93397a-9a96-4902-990a-e7524fec3dd2","Type":"ContainerStarted","Data":"06f2694831e7ea06e6cfbd3c43c28f5bf9eac6e79c7dffa174afe2ee03611d3a"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.705650 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-tg7kf" event={"ID":"e08dbbbf-0bcb-42b8-9490-82434c360f01","Type":"ContainerStarted","Data":"a08bffb324795172170cf21a64ff44097deb3cf8d679b3f1e7f37c6e87c6987b"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.707536 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" event={"ID":"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6","Type":"ContainerStarted","Data":"9e18102ea52b0bcb3258ee01b310e1283354303143dff520d447794dfe8f100a"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.715766 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.715864 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" event={"ID":"4a2a1306-2eff-4fc2-ac8c-8bb461353abd","Type":"ContainerStarted","Data":"56076e35522ef701764d9793e9ef06e12a4443ee12795e44834c0da68daf58e7"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.757949 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" event={"ID":"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1","Type":"ContainerStarted","Data":"71e75fdbded947adfd7452c046b8975de12b65d821ab26a82635ddcef45b42b2"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.795608 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.796436 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:56.296275776 +0000 UTC m=+146.143882197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.842124 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" podStartSLOduration=123.842102173 podStartE2EDuration="2m3.842102173s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:55.841470185 +0000 UTC m=+145.689076606" watchObservedRunningTime="2025-11-28 15:26:55.842102173 +0000 UTC m=+145.689708594" Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.853016 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" event={"ID":"c7e82223-b810-4483-8505-82973a515276","Type":"ContainerStarted","Data":"b2b065f6370c5a4da42da59b05f5b8dcc07c1de64dfc1f4796bec48e4a728fbb"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.902395 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:55 crc kubenswrapper[4647]: E1128 15:26:55.903107 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:56.403092248 +0000 UTC m=+146.250698669 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.921381 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7z7l2" event={"ID":"5e711623-78c2-4e1f-a65f-1c4d871e3d21","Type":"ContainerStarted","Data":"9d08716e3a6426343734f07b58688c1b6bc355d5cf58da8c470947ec276bfc25"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.943272 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" event={"ID":"c044b720-8bbd-4d48-a61b-d37188cfa478","Type":"ContainerStarted","Data":"e2b5d24fbe26dfc40f15b2e0d166b282441aef8bd176e52a99c05a3589f56833"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.944115 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.964811 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq" event={"ID":"d2229fa4-dc3e-4783-8163-c535db52c796","Type":"ContainerStarted","Data":"26912f88d3eda4fa9a44cd9223438689e8701806506ae477f2e8c179332a9c08"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.968937 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" event={"ID":"001333da-a6ba-41b2-a280-8b4825dd8a41","Type":"ContainerStarted","Data":"00ccb32a8c643b8eed40e04b4fd0d84967f5e4f4815c176979f7119026090f5c"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.980016 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" event={"ID":"4408688c-7115-4338-9b06-e30b0ed30399","Type":"ContainerStarted","Data":"b3c4d9e62817acb21d44cd21f8fde33ed81f527222e8d36f4800546ecda555ad"} Nov 28 15:26:55 crc kubenswrapper[4647]: I1128 15:26:55.997724 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" event={"ID":"d750be5a-3ad8-4a6b-b959-479aacbb4f95","Type":"ContainerStarted","Data":"67e50862bfb3706182b3f53500b31d61afc2ca75047a1cf0351e1186caf831dd"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.005577 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:56 crc kubenswrapper[4647]: E1128 15:26:56.006065 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:56.50604059 +0000 UTC m=+146.353647011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.062705 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" event={"ID":"a9d10933-4352-41fb-94af-6239e1c90b98","Type":"ContainerStarted","Data":"5cfbb7938d1d6e4796f590ea97d273c3f0156bdd61016b35ca0c02083107d46e"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.091736 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" event={"ID":"d3360086-d51c-4932-8cfa-b166acba27dc","Type":"ContainerStarted","Data":"f720a9d01248722d048a60802bf2593a13bcfe72cd8490f22ac06fa4117d32fc"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.109375 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:56 crc kubenswrapper[4647]: E1128 15:26:56.110925 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:56.610911466 +0000 UTC m=+146.458517887 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.124471 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-xqbx6" event={"ID":"8b81aefd-2f13-4986-a339-2accc815813a","Type":"ContainerStarted","Data":"285595675c01bd9276fb4d07eeadf77efd45be706a9a5ef680e5a2db0039149a"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.127924 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-lkh97" event={"ID":"a2e3836a-3363-4908-8217-57a6d1737d91","Type":"ContainerStarted","Data":"fa51b319d0272688285828b91d685999bc174c6fea914a3271796735717bcd6b"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.145583 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-6t8hg" podStartSLOduration=124.145563637 podStartE2EDuration="2m4.145563637s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:56.145188636 +0000 UTC m=+145.992795057" watchObservedRunningTime="2025-11-28 15:26:56.145563637 +0000 UTC m=+145.993170058" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.146074 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-8cp4n" event={"ID":"c0e19f9c-f340-40c4-82c8-cebc432e7e15","Type":"ContainerStarted","Data":"172576ac89501f3618dc19466cda86e96f2105be333946a01838e548a5a7fb33"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.147562 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.148397 4647 patch_prober.go:28] interesting pod/console-operator-58897d9998-8cp4n container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.148489 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-8cp4n" podUID="c0e19f9c-f340-40c4-82c8-cebc432e7e15" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.198652 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" event={"ID":"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5","Type":"ContainerStarted","Data":"f6a7f9a9ba0b8c82c9692f8e60586faf5d8f16d28f1dc29eb0f65f75b50362bd"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.212167 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:56 crc kubenswrapper[4647]: E1128 15:26:56.213921 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:56.71389624 +0000 UTC m=+146.561502661 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.221122 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" event={"ID":"24691f90-319b-4ccf-95ff-8c085f85bef9","Type":"ContainerStarted","Data":"de2a2fc19e4fae36c55dcbd5381a289bab97bbf9abac72f68e2f2ff124cc0a64"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.228813 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rt5dp" podStartSLOduration=124.22877669 podStartE2EDuration="2m4.22877669s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:56.221609048 +0000 UTC m=+146.069215469" watchObservedRunningTime="2025-11-28 15:26:56.22877669 +0000 UTC m=+146.076383111" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.261812 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" event={"ID":"d20da780-3761-4116-9d42-aa980d8ebbb8","Type":"ContainerStarted","Data":"5e9e9265f4a8d26f3992bf517fee3e28671672e45858f243cbf7c6e73a113664"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.295192 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" event={"ID":"fc997855-51c2-423d-876b-055ecf2df450","Type":"ContainerStarted","Data":"ea9cc56cdbfd48301e1701205522481c3036a89506fbf7fa83a775638a4d8084"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.314328 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:56 crc kubenswrapper[4647]: E1128 15:26:56.314782 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:56.814767563 +0000 UTC m=+146.662373984 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.318835 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-t88ct" event={"ID":"a2609685-750c-4716-8f93-a37049032177","Type":"ContainerStarted","Data":"de4ccfb3998b14794e5102e020859c6f5e818b5c96c9587a3316c497bd968cb3"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.319874 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" podStartSLOduration=124.319851227 podStartE2EDuration="2m4.319851227s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:56.273012792 +0000 UTC m=+146.120619213" watchObservedRunningTime="2025-11-28 15:26:56.319851227 +0000 UTC m=+146.167457648" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.320870 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-8cp4n" podStartSLOduration=125.32086601500001 podStartE2EDuration="2m5.320866015s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:56.319828936 +0000 UTC m=+146.167435357" watchObservedRunningTime="2025-11-28 15:26:56.320866015 +0000 UTC m=+146.168472436" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.331487 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" event={"ID":"b9890322-9811-4113-bc61-3306b91dfd5c","Type":"ContainerStarted","Data":"77cf1d32946fca8e6861359b687dae18f29ffe33470e14eecb602e3235f5c27e"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.336827 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" event={"ID":"24db15e7-f30e-47c0-8227-a38041aea560","Type":"ContainerStarted","Data":"3c2d3ab40ce613f345a983aaff1e4935187814ea14d501a1508a7395ce37ac99"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.339878 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-glkws" event={"ID":"00e78d31-cbbb-4ee5-b687-14e01b2761df","Type":"ContainerStarted","Data":"995091e0814b1d7ad845895e6075d953ac915dd11eb79d49e894e3f9dbc3a88a"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.365027 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fpqv9" podStartSLOduration=125.365000974 podStartE2EDuration="2m5.365000974s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:56.353087287 +0000 UTC m=+146.200693728" watchObservedRunningTime="2025-11-28 15:26:56.365000974 +0000 UTC m=+146.212607395" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.365925 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" event={"ID":"3ffc95f7-12ba-4daf-b0ce-1236138a4844","Type":"ContainerStarted","Data":"0c5b3d4fead608da4242d85e61104df70bd7b2260309cb352001b1f10d825845"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.378124 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" event={"ID":"232ba417-fd39-4e22-b02f-5a911f9f8b33","Type":"ContainerStarted","Data":"e512d93dbf17edaf2c30aea2e271b6f8c229598fcd464e4b087337d4f9e47c24"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.387306 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-xqbx6" podStartSLOduration=7.387284974 podStartE2EDuration="7.387284974s" podCreationTimestamp="2025-11-28 15:26:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:56.38536921 +0000 UTC m=+146.232975631" watchObservedRunningTime="2025-11-28 15:26:56.387284974 +0000 UTC m=+146.234891395" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.388834 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" event={"ID":"dc3703bb-9abe-4db2-8988-0698f9c97957","Type":"ContainerStarted","Data":"e83ff2b72baf18f6cbabe085069847beca42e94b7aa842f3808c10ba3ac3d468"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.410338 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" event={"ID":"e47f3602-639f-459a-a9e1-55695c70bd96","Type":"ContainerStarted","Data":"7848b9b8a2f08ce03e53f040295ed495887a3d9f227692777319c31396e3b3b0"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.420624 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:56 crc kubenswrapper[4647]: E1128 15:26:56.421338 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:56.921321267 +0000 UTC m=+146.768927688 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.432917 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" event={"ID":"522dc69e-0abb-4f90-b033-979c2bd1ef9d","Type":"ContainerStarted","Data":"e4c03e835e2ae16e58cc73e8f16adafbc5a3d064eaee084dcede187ba5185003"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.434534 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" event={"ID":"41268623-f4ac-491d-8948-c002f6eac77f","Type":"ContainerStarted","Data":"b61100aeeaa859f76760d6bc181d5804ade9c4904a42c7ee7ab421d8babcae04"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.449366 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qtsj" podStartSLOduration=124.449340509 podStartE2EDuration="2m4.449340509s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:56.409519663 +0000 UTC m=+146.257126084" watchObservedRunningTime="2025-11-28 15:26:56.449340509 +0000 UTC m=+146.296946930" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.453513 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-z99bl" event={"ID":"96144897-cea5-48a0-ad58-ccfa928aba03","Type":"ContainerStarted","Data":"9202dedae5f86040a0c28a873c50ce5c66b36fa4c7285a6cf99113d35c71377c"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.457363 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" event={"ID":"d4458ea3-1882-4206-b1b2-885143795954","Type":"ContainerStarted","Data":"c26f3513674a36dca16a8412791a6df97daa1b3abf8bc69038056b604a8268d5"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.480997 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" event={"ID":"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c","Type":"ContainerStarted","Data":"270279d56db90979280fa3c29494f517e470c0b8a791786ec309606bf1e65af9"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.494102 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" event={"ID":"00b15066-0185-4ee0-9e0d-ef9db2d4ab19","Type":"ContainerStarted","Data":"ac98d1087b20ffdbd42723010b8be0cd4d077f74cc51b1cf84aa6637e524ceca"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.521089 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-t88ct" podStartSLOduration=124.521064288 podStartE2EDuration="2m4.521064288s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:56.519192705 +0000 UTC m=+146.366799136" watchObservedRunningTime="2025-11-28 15:26:56.521064288 +0000 UTC m=+146.368670709" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.521454 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wwsbr" podStartSLOduration=124.521449509 podStartE2EDuration="2m4.521449509s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:56.478656569 +0000 UTC m=+146.326262990" watchObservedRunningTime="2025-11-28 15:26:56.521449509 +0000 UTC m=+146.369055930" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.523355 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:56 crc kubenswrapper[4647]: E1128 15:26:56.523723 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:57.023709013 +0000 UTC m=+146.871315434 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.533191 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" event={"ID":"c8ca49c1-6905-4211-a0dd-642ffd28f28c","Type":"ContainerStarted","Data":"b501441110f5e1033ddf65fb9c2a0651dd977a0d38b7673f1793f96c160fd6e9"} Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.547396 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.628937 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:56 crc kubenswrapper[4647]: E1128 15:26:56.629382 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:57.129358532 +0000 UTC m=+146.976964953 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.730944 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:56 crc kubenswrapper[4647]: E1128 15:26:56.733046 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:57.233033874 +0000 UTC m=+147.080640295 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.832443 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:56 crc kubenswrapper[4647]: E1128 15:26:56.832841 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:57.332821927 +0000 UTC m=+147.180428348 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.934161 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:56 crc kubenswrapper[4647]: E1128 15:26:56.934532 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:57.434519804 +0000 UTC m=+147.282126225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.946335 4647 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-2s6l7 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 15:26:56 crc kubenswrapper[4647]: I1128 15:26:56.946397 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" podUID="c044b720-8bbd-4d48-a61b-d37188cfa478" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.040544 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:57 crc kubenswrapper[4647]: E1128 15:26:57.040868 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:57.54081452 +0000 UTC m=+147.388420941 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.136513 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.143370 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:57 crc kubenswrapper[4647]: E1128 15:26:57.143799 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:57.643785923 +0000 UTC m=+147.491392334 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.157763 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:26:57 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:26:57 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:26:57 crc kubenswrapper[4647]: healthz check failed Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.158182 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.245056 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:57 crc kubenswrapper[4647]: E1128 15:26:57.245511 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:57.74547987 +0000 UTC m=+147.593086291 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.352580 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:57 crc kubenswrapper[4647]: E1128 15:26:57.353309 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:57.853286619 +0000 UTC m=+147.700893040 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.454134 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:57 crc kubenswrapper[4647]: E1128 15:26:57.454681 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:57.954663447 +0000 UTC m=+147.802269868 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.556036 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:57 crc kubenswrapper[4647]: E1128 15:26:57.556446 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.056431175 +0000 UTC m=+147.904037596 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.623141 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" event={"ID":"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6","Type":"ContainerStarted","Data":"ad85127e24b570fc14376b2b3dcccc013586fbd7d0a56b910c1a95adb7ea43d2"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.642075 4647 generic.go:334] "Generic (PLEG): container finished" podID="fc997855-51c2-423d-876b-055ecf2df450" containerID="24c39cd2f850ec79279f03e1b37003af54b86eeab25bda1d525909871e0a2491" exitCode=0 Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.642835 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" event={"ID":"fc997855-51c2-423d-876b-055ecf2df450","Type":"ContainerDied","Data":"24c39cd2f850ec79279f03e1b37003af54b86eeab25bda1d525909871e0a2491"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.658578 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:57 crc kubenswrapper[4647]: E1128 15:26:57.659052 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.159029477 +0000 UTC m=+148.006635898 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.669815 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" event={"ID":"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c","Type":"ContainerStarted","Data":"adb7b47e17a3e80f14c936217d37ee0cb9df9d4d4466a6fb925ac374c1f6c920"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.708990 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" event={"ID":"e47f3602-639f-459a-a9e1-55695c70bd96","Type":"ContainerStarted","Data":"bac85bb6166e388e4fbb8850eab3423cb1f8cb44d819f0bf3997cefbee265e2a"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.720181 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" event={"ID":"522dc69e-0abb-4f90-b033-979c2bd1ef9d","Type":"ContainerStarted","Data":"8e48d0b80e093cb6a9d31d435374efbf8a70e72a77765f1d94e88757cb5f9e0c"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.727044 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq" event={"ID":"d2229fa4-dc3e-4783-8163-c535db52c796","Type":"ContainerStarted","Data":"648e367ff57061bb0ee6a84d67cfba37c1c7816741fccb8c31e561501c775381"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.749367 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" podStartSLOduration=126.749347782 podStartE2EDuration="2m6.749347782s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:57.746742068 +0000 UTC m=+147.594348489" watchObservedRunningTime="2025-11-28 15:26:57.749347782 +0000 UTC m=+147.596954203" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.751930 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" event={"ID":"ecd537d5-ea50-48b2-8565-283566427e38","Type":"ContainerStarted","Data":"b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.753216 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.769178 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:57 crc kubenswrapper[4647]: E1128 15:26:57.771148 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.271118247 +0000 UTC m=+148.118724668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.808673 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" podStartSLOduration=125.808655529 podStartE2EDuration="2m5.808655529s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:57.807870907 +0000 UTC m=+147.655477328" watchObservedRunningTime="2025-11-28 15:26:57.808655529 +0000 UTC m=+147.656261950" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.817301 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7z7l2" event={"ID":"5e711623-78c2-4e1f-a65f-1c4d871e3d21","Type":"ContainerStarted","Data":"63e6ac7a97c106e1173052443757328984b70cdd7e60c05ba619c5bdadf2667f"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.818090 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7z7l2" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.823016 4647 patch_prober.go:28] interesting pod/downloads-7954f5f757-7z7l2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.823075 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7z7l2" podUID="5e711623-78c2-4e1f-a65f-1c4d871e3d21" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.835224 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.842662 4647 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8tcng container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.842745 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" podUID="b9890322-9811-4113-bc61-3306b91dfd5c" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.853225 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" event={"ID":"3ffc95f7-12ba-4daf-b0ce-1236138a4844","Type":"ContainerStarted","Data":"6f5f1994f6b0fa9b9c33d5081e4b919fb862b3efc1cc0dc33e615689bca1f311"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.858296 4647 generic.go:334] "Generic (PLEG): container finished" podID="d4458ea3-1882-4206-b1b2-885143795954" containerID="8da62a13ae4ac1b450a1b81de0c3f5301b72cc6a71ea39bf23c25fcb5575a5da" exitCode=0 Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.858372 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" event={"ID":"d4458ea3-1882-4206-b1b2-885143795954","Type":"ContainerDied","Data":"8da62a13ae4ac1b450a1b81de0c3f5301b72cc6a71ea39bf23c25fcb5575a5da"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.864955 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-z99bl" event={"ID":"96144897-cea5-48a0-ad58-ccfa928aba03","Type":"ContainerStarted","Data":"240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.873372 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:57 crc kubenswrapper[4647]: E1128 15:26:57.874588 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.374554753 +0000 UTC m=+148.222161174 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.874582 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.875768 4647 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5rsn5 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/healthz\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.875829 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.14:8080/healthz\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.930268 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" event={"ID":"229d7233-477b-42a8-81e3-5437db9d608e","Type":"ContainerStarted","Data":"f3aa30698c37dd0484dc67def2b285eaf45a73eba57d9a06fc8ca0f6326b144d"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.945890 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" event={"ID":"001333da-a6ba-41b2-a280-8b4825dd8a41","Type":"ContainerStarted","Data":"82bea7d1eb7af444d945b46361ebe620e79f6d52032c55b7649d54fa817638ec"} Nov 28 15:26:57 crc kubenswrapper[4647]: I1128 15:26:57.975862 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:57 crc kubenswrapper[4647]: E1128 15:26:57.979311 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.479285706 +0000 UTC m=+148.326892297 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.014754 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" event={"ID":"c8ca49c1-6905-4211-a0dd-642ffd28f28c","Type":"ContainerStarted","Data":"9ecab09a8a14ce54164013b62bccda3ad9bfe5506153013764ee89e0cf1ef301"} Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.027861 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-k2k77" podStartSLOduration=127.027838509 podStartE2EDuration="2m7.027838509s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.026278955 +0000 UTC m=+147.873885376" watchObservedRunningTime="2025-11-28 15:26:58.027838509 +0000 UTC m=+147.875444930" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.046171 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" event={"ID":"41268623-f4ac-491d-8948-c002f6eac77f","Type":"ContainerStarted","Data":"4942df7bbacf8c6c87fae9b4341a33745ef220570723377fc5d5490c98e7d01b"} Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.051822 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-tg7kf" event={"ID":"e08dbbbf-0bcb-42b8-9490-82434c360f01","Type":"ContainerStarted","Data":"3d33cedc17400468874b2975bf3ac0747527cacf70c2e15804592165b3fbed22"} Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.077223 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:58 crc kubenswrapper[4647]: E1128 15:26:58.079608 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.579572653 +0000 UTC m=+148.427179074 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.087615 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.107269 4647 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-wx7sm container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.107336 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" podUID="d20da780-3761-4116-9d42-aa980d8ebbb8" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.120309 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" event={"ID":"d3360086-d51c-4932-8cfa-b166acba27dc","Type":"ContainerStarted","Data":"3892cdc7af88476e51faa8858691d526e1473afad384a30dd81ede25c08634a2"} Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.121441 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" podStartSLOduration=126.121426526 podStartE2EDuration="2m6.121426526s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.117213877 +0000 UTC m=+147.964820298" watchObservedRunningTime="2025-11-28 15:26:58.121426526 +0000 UTC m=+147.969032947" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.151551 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:26:58 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:26:58 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:26:58 crc kubenswrapper[4647]: healthz check failed Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.154946 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.182218 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:58 crc kubenswrapper[4647]: E1128 15:26:58.184039 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.684012367 +0000 UTC m=+148.531618788 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.207162 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" podStartSLOduration=126.207134011 podStartE2EDuration="2m6.207134011s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.201919933 +0000 UTC m=+148.049526354" watchObservedRunningTime="2025-11-28 15:26:58.207134011 +0000 UTC m=+148.054740432" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.225547 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" event={"ID":"fd93397a-9a96-4902-990a-e7524fec3dd2","Type":"ContainerStarted","Data":"45e23d01e6b41a6574cbd7a51ebdbd66af0ac116853c080991c00f090e7efafa"} Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.226845 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.233788 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" event={"ID":"4a2a1306-2eff-4fc2-ac8c-8bb461353abd","Type":"ContainerStarted","Data":"d3f33c0a79e675da0fefc5755713f99b693cc85745cd5bc8e2f4a84aa91263a4"} Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.300362 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:58 crc kubenswrapper[4647]: E1128 15:26:58.302324 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.802296673 +0000 UTC m=+148.649903094 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.320047 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" podStartSLOduration=126.320020954 podStartE2EDuration="2m6.320020954s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.261342454 +0000 UTC m=+148.108948875" watchObservedRunningTime="2025-11-28 15:26:58.320020954 +0000 UTC m=+148.167627375" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.402722 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:58 crc kubenswrapper[4647]: E1128 15:26:58.403137 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:58.903120705 +0000 UTC m=+148.750727126 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.425487 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" podStartSLOduration=127.425467067 podStartE2EDuration="2m7.425467067s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.41745934 +0000 UTC m=+148.265065761" watchObservedRunningTime="2025-11-28 15:26:58.425467067 +0000 UTC m=+148.273073488" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.425921 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" podStartSLOduration=126.4259151 podStartE2EDuration="2m6.4259151s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.332998521 +0000 UTC m=+148.180604942" watchObservedRunningTime="2025-11-28 15:26:58.4259151 +0000 UTC m=+148.273521521" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.503877 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:58 crc kubenswrapper[4647]: E1128 15:26:58.504002 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.003970558 +0000 UTC m=+148.851576979 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.504268 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:58 crc kubenswrapper[4647]: E1128 15:26:58.504724 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.004716739 +0000 UTC m=+148.852323160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.526820 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-7z7l2" podStartSLOduration=127.526793143 podStartE2EDuration="2m7.526793143s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.477844178 +0000 UTC m=+148.325450599" watchObservedRunningTime="2025-11-28 15:26:58.526793143 +0000 UTC m=+148.374399564" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.527893 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qqcpz" podStartSLOduration=127.527886334 podStartE2EDuration="2m7.527886334s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.526565967 +0000 UTC m=+148.374172378" watchObservedRunningTime="2025-11-28 15:26:58.527886334 +0000 UTC m=+148.375492755" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.551231 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-8cp4n" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.567350 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.605006 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.605151 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.605187 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.605259 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.605317 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:58 crc kubenswrapper[4647]: E1128 15:26:58.605609 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.105586592 +0000 UTC m=+148.953193013 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.615308 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.633787 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.635434 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.662400 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-l8fwr" podStartSLOduration=126.662382198 podStartE2EDuration="2m6.662382198s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.596248878 +0000 UTC m=+148.443855299" watchObservedRunningTime="2025-11-28 15:26:58.662382198 +0000 UTC m=+148.509988619" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.706456 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:58 crc kubenswrapper[4647]: E1128 15:26:58.707005 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.20698221 +0000 UTC m=+149.054588631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.724077 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.725756 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f9nm6" podStartSLOduration=126.7257317 podStartE2EDuration="2m6.7257317s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.723685053 +0000 UTC m=+148.571291474" watchObservedRunningTime="2025-11-28 15:26:58.7257317 +0000 UTC m=+148.573338121" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.744129 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.753665 4647 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-n7kp2 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.19:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.753747 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" podUID="ecd537d5-ea50-48b2-8565-283566427e38" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.19:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.780032 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.810056 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:58 crc kubenswrapper[4647]: E1128 15:26:58.810528 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.310509879 +0000 UTC m=+149.158116300 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.863499 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-z99bl" podStartSLOduration=127.863470577 podStartE2EDuration="2m7.863470577s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.799105686 +0000 UTC m=+148.646712127" watchObservedRunningTime="2025-11-28 15:26:58.863470577 +0000 UTC m=+148.711076998" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.911267 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-tg7kf" podStartSLOduration=9.911240068 podStartE2EDuration="9.911240068s" podCreationTimestamp="2025-11-28 15:26:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.865310129 +0000 UTC m=+148.712916550" watchObservedRunningTime="2025-11-28 15:26:58.911240068 +0000 UTC m=+148.758846489" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.912825 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5gqvz" podStartSLOduration=126.912818733 podStartE2EDuration="2m6.912818733s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.907537283 +0000 UTC m=+148.755143694" watchObservedRunningTime="2025-11-28 15:26:58.912818733 +0000 UTC m=+148.760425154" Nov 28 15:26:58 crc kubenswrapper[4647]: I1128 15:26:58.916170 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:58 crc kubenswrapper[4647]: E1128 15:26:58.916660 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.41662883 +0000 UTC m=+149.264235251 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.017696 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.018288 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.518266025 +0000 UTC m=+149.365872436 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.048734 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" podStartSLOduration=127.048715847 podStartE2EDuration="2m7.048715847s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:58.978789119 +0000 UTC m=+148.826395530" watchObservedRunningTime="2025-11-28 15:26:59.048715847 +0000 UTC m=+148.896322258" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.060211 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.121805 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.122346 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.622326149 +0000 UTC m=+149.469932570 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.137926 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:26:59 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:26:59 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:26:59 crc kubenswrapper[4647]: healthz check failed Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.138018 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.142231 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" podStartSLOduration=127.142208351 podStartE2EDuration="2m7.142208351s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:59.09303374 +0000 UTC m=+148.940640161" watchObservedRunningTime="2025-11-28 15:26:59.142208351 +0000 UTC m=+148.989814772" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.143307 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" podStartSLOduration=127.143303392 podStartE2EDuration="2m7.143303392s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:59.140185074 +0000 UTC m=+148.987791495" watchObservedRunningTime="2025-11-28 15:26:59.143303392 +0000 UTC m=+148.990909813" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.183222 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" podStartSLOduration=127.183198321 podStartE2EDuration="2m7.183198321s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:59.182055398 +0000 UTC m=+149.029661819" watchObservedRunningTime="2025-11-28 15:26:59.183198321 +0000 UTC m=+149.030804742" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.223728 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.223981 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.723944823 +0000 UTC m=+149.571551244 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.224053 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.224619 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.724609732 +0000 UTC m=+149.572216153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.261965 4647 generic.go:334] "Generic (PLEG): container finished" podID="dcf09f40-829e-444a-9ff3-2cd3ca8a72a5" containerID="77e79f08b918d92086194932f16cf884152d7c2f7243e779afbc1311587c46ea" exitCode=0 Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.262152 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" event={"ID":"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5","Type":"ContainerDied","Data":"77e79f08b918d92086194932f16cf884152d7c2f7243e779afbc1311587c46ea"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.314831 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" event={"ID":"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1","Type":"ContainerStarted","Data":"663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.318453 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" event={"ID":"fc997855-51c2-423d-876b-055ecf2df450","Type":"ContainerStarted","Data":"da218709bc11e7ecb5a448046143a90652237637148ada86e6d57e73707c9038"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.320204 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" event={"ID":"b9890322-9811-4113-bc61-3306b91dfd5c","Type":"ContainerStarted","Data":"261950f53f69aaa8aa50b6229be5e36f651d0703a30f42c5338ece0ab560eedf"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.322117 4647 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8tcng container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.322164 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" podUID="b9890322-9811-4113-bc61-3306b91dfd5c" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.332167 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.333856 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.833820181 +0000 UTC m=+149.681426602 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.334600 4647 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5rsn5 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/healthz\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.334655 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.14:8080/healthz\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.347037 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-g75wv" event={"ID":"24db15e7-f30e-47c0-8227-a38041aea560","Type":"ContainerStarted","Data":"994d5415cf76e5fff2368f8baeab781fcfdb7e0489889db7e851cd163bc82057"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.366493 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pjcrl" event={"ID":"c8ca49c1-6905-4211-a0dd-642ffd28f28c","Type":"ContainerStarted","Data":"0fd972a2ddde6034cb802102fcdf7433596e8c63abbf577c6b168ab0790c9ba9"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.395282 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-ng9vc" podStartSLOduration=127.395260419 podStartE2EDuration="2m7.395260419s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:59.392730228 +0000 UTC m=+149.240336649" watchObservedRunningTime="2025-11-28 15:26:59.395260419 +0000 UTC m=+149.242866850" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.434056 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.435102 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:26:59.935062275 +0000 UTC m=+149.782668696 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.435625 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-vklkx" event={"ID":"24691f90-319b-4ccf-95ff-8c085f85bef9","Type":"ContainerStarted","Data":"d5114cea92e1989cd8b4c084b96a7f5af3ab43244088f70e192c095cea0e520c"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.464649 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-76c8x" event={"ID":"c7e82223-b810-4483-8505-82973a515276","Type":"ContainerStarted","Data":"dec0a6bab9c27e3e67297b331ec58717365f4cad9030505507061392a2082a4d"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.491311 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" event={"ID":"d20da780-3761-4116-9d42-aa980d8ebbb8","Type":"ContainerStarted","Data":"cbe5c4d81d92bec3a4b63d312ee1b4330b271e964f5653d474acf1233f35aecd"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.506631 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" event={"ID":"e47f3602-639f-459a-a9e1-55695c70bd96","Type":"ContainerStarted","Data":"f6efdbecc1cabf91c06595d49345abb40cfd4fcc5b6d49db0cfed4c2b866b137"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.521762 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq" event={"ID":"d2229fa4-dc3e-4783-8163-c535db52c796","Type":"ContainerStarted","Data":"c645abcf40c2af720c76eacb8babedabe287f6d784e8e1db782304464654c8c2"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.537728 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.541275 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.041251359 +0000 UTC m=+149.888857780 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.548901 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" event={"ID":"00b15066-0185-4ee0-9e0d-ef9db2d4ab19","Type":"ContainerStarted","Data":"167c4c3acfeff36f37e6d0454b96d394ca8c914cbd3ad7039b3c643b4bd1870c"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.548965 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" event={"ID":"00b15066-0185-4ee0-9e0d-ef9db2d4ab19","Type":"ContainerStarted","Data":"440f15aa1d0fe983e283aa423e5f9199a789d102a62838a10b31a4fe56abed95"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.571227 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-md8hf" event={"ID":"8bf19ca3-646a-4484-b312-0ad04033cf51","Type":"ContainerStarted","Data":"b9a088d6595c09738c0cc9e2c68a8a1a55316fef0bf5414282a0c05dd5434328"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.595130 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" event={"ID":"fd93397a-9a96-4902-990a-e7524fec3dd2","Type":"ContainerStarted","Data":"0fd95d60487cc8c3c648902ca136b86c8b31506d06d686f9b739ae0b1de107bf"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.597502 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wx7sm" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.610880 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" event={"ID":"8b1d5eba-0e82-40a2-8bb6-3d6ddf56d06c","Type":"ContainerStarted","Data":"b7b3b1a0ff7dd9443e3760155bf35f7baf0c9a6c01a23600b4fbdbc3dc1172a5"} Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.616776 4647 patch_prober.go:28] interesting pod/downloads-7954f5f757-7z7l2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.616849 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7z7l2" podUID="5e711623-78c2-4e1f-a65f-1c4d871e3d21" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.618202 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" podStartSLOduration=127.618189815 podStartE2EDuration="2m7.618189815s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:59.615979823 +0000 UTC m=+149.463586244" watchObservedRunningTime="2025-11-28 15:26:59.618189815 +0000 UTC m=+149.465796236" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.642608 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.651461 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.655647 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.155626484 +0000 UTC m=+150.003232905 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.749363 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.749624 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.249586542 +0000 UTC m=+150.097192963 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.750012 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.750462 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.250446426 +0000 UTC m=+150.098052847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.854293 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.855003 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.354982983 +0000 UTC m=+150.202589404 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.955664 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-98qfq" podStartSLOduration=127.95563066 podStartE2EDuration="2m7.95563066s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:59.79334776 +0000 UTC m=+149.640954181" watchObservedRunningTime="2025-11-28 15:26:59.95563066 +0000 UTC m=+149.803237081" Nov 28 15:26:59 crc kubenswrapper[4647]: I1128 15:26:59.962201 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:26:59 crc kubenswrapper[4647]: E1128 15:26:59.962627 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.462611868 +0000 UTC m=+150.310218279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.040333 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-4nxtl" podStartSLOduration=129.040308856 podStartE2EDuration="2m9.040308856s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:26:59.958265705 +0000 UTC m=+149.805872126" watchObservedRunningTime="2025-11-28 15:27:00.040308856 +0000 UTC m=+149.887915277" Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.063227 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:00 crc kubenswrapper[4647]: E1128 15:27:00.063614 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.563577194 +0000 UTC m=+150.411183605 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.063971 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:00 crc kubenswrapper[4647]: E1128 15:27:00.064315 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.564295344 +0000 UTC m=+150.411901765 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.117557 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sfpgz" podStartSLOduration=128.11753494 podStartE2EDuration="2m8.11753494s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:27:00.040677346 +0000 UTC m=+149.888283767" watchObservedRunningTime="2025-11-28 15:27:00.11753494 +0000 UTC m=+149.965141361" Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.130584 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:00 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:00 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:00 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.130642 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.167671 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:00 crc kubenswrapper[4647]: E1128 15:27:00.168198 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.668175033 +0000 UTC m=+150.515781454 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.184761 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-ql2nw" podStartSLOduration=128.184735781 podStartE2EDuration="2m8.184735781s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:27:00.12105726 +0000 UTC m=+149.968663681" watchObservedRunningTime="2025-11-28 15:27:00.184735781 +0000 UTC m=+150.032342202" Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.275670 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:00 crc kubenswrapper[4647]: E1128 15:27:00.276101 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.776084255 +0000 UTC m=+150.623690676 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.377091 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:00 crc kubenswrapper[4647]: E1128 15:27:00.377785 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.877765021 +0000 UTC m=+150.725371442 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.479126 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:00 crc kubenswrapper[4647]: E1128 15:27:00.479966 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:00.979948022 +0000 UTC m=+150.827554443 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.584006 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:00 crc kubenswrapper[4647]: E1128 15:27:00.584332 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:01.084313054 +0000 UTC m=+150.931919485 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.684202 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-lkh97" event={"ID":"a2e3836a-3363-4908-8217-57a6d1737d91","Type":"ContainerStarted","Data":"4808f6fe29ce5b650fbf0e4801c645f58dd36bd3ec8c790d20a1227e2c304054"} Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.684277 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-lkh97" event={"ID":"a2e3836a-3363-4908-8217-57a6d1737d91","Type":"ContainerStarted","Data":"0f87686c43e82a6a70d99655ba594a25e664c7f6c044f9783d0557150522374a"} Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.685363 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-lkh97" Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.688270 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:00 crc kubenswrapper[4647]: E1128 15:27:00.688703 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:01.188689526 +0000 UTC m=+151.036295947 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.704386 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" event={"ID":"dcf09f40-829e-444a-9ff3-2cd3ca8a72a5","Type":"ContainerStarted","Data":"605a4330eb04011ef8803c7c8cc94ec0f36f885071b380bbcd7401c2bf3248dc"} Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.705231 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.715296 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-glkws" event={"ID":"00e78d31-cbbb-4ee5-b687-14e01b2761df","Type":"ContainerStarted","Data":"cf9cd67c784a4bc513bac3c6a6595a6641b6916d885c3dfacc96da76d881f509"} Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.751142 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" event={"ID":"d4458ea3-1882-4206-b1b2-885143795954","Type":"ContainerStarted","Data":"7eccb25c1c03b0b575e94a2f7474694e4460781698d8491021335979dffb3f44"} Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.751188 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" event={"ID":"d4458ea3-1882-4206-b1b2-885143795954","Type":"ContainerStarted","Data":"a2fa487908b6daa8812adc21b112bbad43cafaf982e4e0ed4530c9a1004826ee"} Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.789873 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" event={"ID":"d3360086-d51c-4932-8cfa-b166acba27dc","Type":"ContainerStarted","Data":"58edadd909c732b8ebb27ef11e646db5f64da38ea53fb18112ba3953b49e6750"} Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.794579 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:00 crc kubenswrapper[4647]: E1128 15:27:00.795897 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:01.295877758 +0000 UTC m=+151.143484179 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.799049 4647 patch_prober.go:28] interesting pod/downloads-7954f5f757-7z7l2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.799118 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7z7l2" podUID="5e711623-78c2-4e1f-a65f-1c4d871e3d21" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.799749 4647 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5rsn5 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/healthz\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.799767 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.14:8080/healthz\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.806823 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-lkh97" podStartSLOduration=11.806802047 podStartE2EDuration="11.806802047s" podCreationTimestamp="2025-11-28 15:26:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:27:00.804576884 +0000 UTC m=+150.652183305" watchObservedRunningTime="2025-11-28 15:27:00.806802047 +0000 UTC m=+150.654408468" Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.900286 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:00 crc kubenswrapper[4647]: E1128 15:27:00.902033 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:01.402011321 +0000 UTC m=+151.249617742 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:00 crc kubenswrapper[4647]: I1128 15:27:00.981749 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tcng" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.002036 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.002511 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:01.502484793 +0000 UTC m=+151.350091214 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.006594 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" podStartSLOduration=130.006583819 podStartE2EDuration="2m10.006583819s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:27:01.003493081 +0000 UTC m=+150.851099502" watchObservedRunningTime="2025-11-28 15:27:01.006583819 +0000 UTC m=+150.854190240" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.105497 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.105975 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:01.605958429 +0000 UTC m=+151.453564850 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.139621 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:01 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:01 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:01 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.139682 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.207187 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.208924 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:01.70886993 +0000 UTC m=+151.556476351 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.209134 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.209640 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:01.709632491 +0000 UTC m=+151.557238912 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.285552 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2ld6n" podStartSLOduration=129.285531238 podStartE2EDuration="2m9.285531238s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:27:01.231776898 +0000 UTC m=+151.079383319" watchObservedRunningTime="2025-11-28 15:27:01.285531238 +0000 UTC m=+151.133137659" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.286145 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" podStartSLOduration=130.286140625 podStartE2EDuration="2m10.286140625s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:27:01.282725309 +0000 UTC m=+151.130331730" watchObservedRunningTime="2025-11-28 15:27:01.286140625 +0000 UTC m=+151.133747046" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.310020 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.310571 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:01.810547896 +0000 UTC m=+151.658154327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.417740 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.418124 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:01.918107208 +0000 UTC m=+151.765713629 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.432241 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.433295 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.446983 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.446986 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.465427 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.520812 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.521023 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ab07a51d-c81c-475b-8472-4bcadd4cdc03\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.521075 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ab07a51d-c81c-475b-8472-4bcadd4cdc03\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.521213 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.021189174 +0000 UTC m=+151.868795605 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.622131 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ab07a51d-c81c-475b-8472-4bcadd4cdc03\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.622223 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.622262 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ab07a51d-c81c-475b-8472-4bcadd4cdc03\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.622342 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ab07a51d-c81c-475b-8472-4bcadd4cdc03\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.622922 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.122911651 +0000 UTC m=+151.970518072 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.669497 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ab07a51d-c81c-475b-8472-4bcadd4cdc03\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.724130 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.724333 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.22430287 +0000 UTC m=+152.071909291 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.724533 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.724874 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.224861345 +0000 UTC m=+152.072467766 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.793129 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fxzbs"] Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.794476 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.794687 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.819651 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.825643 4647 patch_prober.go:28] interesting pod/downloads-7954f5f757-7z7l2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.825696 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7z7l2" podUID="5e711623-78c2-4e1f-a65f-1c4d871e3d21" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.826661 4647 patch_prober.go:28] interesting pod/downloads-7954f5f757-7z7l2 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.826684 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7z7l2" podUID="5e711623-78c2-4e1f-a65f-1c4d871e3d21" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.827145 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.827471 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-utilities\") pod \"community-operators-fxzbs\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.827539 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqxl4\" (UniqueName: \"kubernetes.io/projected/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-kube-api-access-rqxl4\") pod \"community-operators-fxzbs\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.827577 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-catalog-content\") pod \"community-operators-fxzbs\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.827675 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.327660213 +0000 UTC m=+152.175266634 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.841847 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.841891 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.856041 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"b49821578faf5a9d6a3018e603dae18d3507e5c832ef72ec8b37dc517de158ff"} Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.856483 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"6160b951dd3507ba7057ebfa407e160c7030febfccf93e793057accaf7355cda"} Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.857444 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.861693 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fxzbs"] Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.867434 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.888755 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9fe734a2c235e70152c0a53f2ba7a3fab602cbc0ccb7351ea4bb792f2a6e6b8d"} Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.888819 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9b6181c9db4615a5e8cfd2e70445748453858925d5be2787ddb59be22f887d1c"} Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.917994 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"0fb71790bbfcd830017f6864ba5b7f596cb0db7ad78ec2c773952c12b3f97bc7"} Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.918053 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"847666e4d5865e9160bd473dac2677731a15f020d6888275dd4fd74b2390989d"} Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.931343 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqxl4\" (UniqueName: \"kubernetes.io/projected/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-kube-api-access-rqxl4\") pod \"community-operators-fxzbs\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.931805 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9fl4s" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.934388 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-catalog-content\") pod \"community-operators-fxzbs\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.934563 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.934811 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-utilities\") pod \"community-operators-fxzbs\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.935289 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-catalog-content\") pod \"community-operators-fxzbs\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:01 crc kubenswrapper[4647]: E1128 15:27:01.937807 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.437796869 +0000 UTC m=+152.285403290 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.938576 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-utilities\") pod \"community-operators-fxzbs\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.981176 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gwwpv"] Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.988728 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:01 crc kubenswrapper[4647]: I1128 15:27:01.994648 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.011311 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqxl4\" (UniqueName: \"kubernetes.io/projected/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-kube-api-access-rqxl4\") pod \"community-operators-fxzbs\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.027981 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gwwpv"] Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.043650 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.044057 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tckdm\" (UniqueName: \"kubernetes.io/projected/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-kube-api-access-tckdm\") pod \"certified-operators-gwwpv\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.044097 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-utilities\") pod \"certified-operators-gwwpv\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.044123 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-catalog-content\") pod \"certified-operators-gwwpv\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.044234 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.544214629 +0000 UTC m=+152.391821050 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.134089 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:02 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:02 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:02 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.134168 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.145833 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.145902 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tckdm\" (UniqueName: \"kubernetes.io/projected/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-kube-api-access-tckdm\") pod \"certified-operators-gwwpv\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.145930 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-utilities\") pod \"certified-operators-gwwpv\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.145969 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-catalog-content\") pod \"certified-operators-gwwpv\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.146524 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-catalog-content\") pod \"certified-operators-gwwpv\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.146844 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.646815071 +0000 UTC m=+152.494421482 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.147362 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-utilities\") pod \"certified-operators-gwwpv\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.184074 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.187199 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gmr25"] Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.188687 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.212332 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tckdm\" (UniqueName: \"kubernetes.io/projected/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-kube-api-access-tckdm\") pod \"certified-operators-gwwpv\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.247181 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.249265 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gmr25"] Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.249555 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.749517616 +0000 UTC m=+152.597124037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.266111 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzsl6\" (UniqueName: \"kubernetes.io/projected/b87a797d-90cb-4417-b12d-ff15e5776e06-kube-api-access-nzsl6\") pod \"community-operators-gmr25\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.266249 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-utilities\") pod \"community-operators-gmr25\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.266374 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-catalog-content\") pod \"community-operators-gmr25\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.266448 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.266839 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.766825156 +0000 UTC m=+152.614431577 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.353855 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.368366 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.368775 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzsl6\" (UniqueName: \"kubernetes.io/projected/b87a797d-90cb-4417-b12d-ff15e5776e06-kube-api-access-nzsl6\") pod \"community-operators-gmr25\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.368896 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-utilities\") pod \"community-operators-gmr25\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.369022 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-catalog-content\") pod \"community-operators-gmr25\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.369541 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-catalog-content\") pod \"community-operators-gmr25\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.369680 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.869660425 +0000 UTC m=+152.717266846 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.370251 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-utilities\") pod \"community-operators-gmr25\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.388487 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-g4rzr"] Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.389816 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.414087 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzsl6\" (UniqueName: \"kubernetes.io/projected/b87a797d-90cb-4417-b12d-ff15e5776e06-kube-api-access-nzsl6\") pod \"community-operators-gmr25\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.443116 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g4rzr"] Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.471342 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-catalog-content\") pod \"certified-operators-g4rzr\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.471395 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.471844 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:02.971816274 +0000 UTC m=+152.819422695 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.471985 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b887v\" (UniqueName: \"kubernetes.io/projected/10667e93-79c8-470d-a8a1-548e55a41f0e-kube-api-access-b887v\") pod \"certified-operators-g4rzr\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.472058 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-utilities\") pod \"certified-operators-g4rzr\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.540476 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.559275 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.573134 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.573461 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.073437839 +0000 UTC m=+152.921044260 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.573579 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-catalog-content\") pod \"certified-operators-g4rzr\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.573619 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.573658 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b887v\" (UniqueName: \"kubernetes.io/projected/10667e93-79c8-470d-a8a1-548e55a41f0e-kube-api-access-b887v\") pod \"certified-operators-g4rzr\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.573735 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-utilities\") pod \"certified-operators-g4rzr\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.574155 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-utilities\") pod \"certified-operators-g4rzr\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.574381 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-catalog-content\") pod \"certified-operators-g4rzr\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.574678 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.074656273 +0000 UTC m=+152.922262684 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.630161 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b887v\" (UniqueName: \"kubernetes.io/projected/10667e93-79c8-470d-a8a1-548e55a41f0e-kube-api-access-b887v\") pod \"certified-operators-g4rzr\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.676916 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.678793 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.178775319 +0000 UTC m=+153.026381740 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.726604 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.785457 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.785882 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.285868328 +0000 UTC m=+153.133474749 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.887623 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.888004 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.387986766 +0000 UTC m=+153.235593187 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.947358 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-glkws" event={"ID":"00e78d31-cbbb-4ee5-b687-14e01b2761df","Type":"ContainerStarted","Data":"056abba6589a1c5f0175792b8a99385d035cd9b9d278b86ea5917bbdf6bf6615"} Nov 28 15:27:02 crc kubenswrapper[4647]: I1128 15:27:02.990848 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:02 crc kubenswrapper[4647]: E1128 15:27:02.991271 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.491230467 +0000 UTC m=+153.338836898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.034396 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.034467 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.036902 4647 patch_prober.go:28] interesting pod/console-f9d7485db-z99bl container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.036989 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-z99bl" podUID="96144897-cea5-48a0-ad58-ccfa928aba03" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.092484 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.092702 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.592664836 +0000 UTC m=+153.440271257 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.092913 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.093318 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.593303094 +0000 UTC m=+153.440909515 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.117039 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.118002 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.125676 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.128430 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:03 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:03 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:03 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.128871 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.194708 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.694682732 +0000 UTC m=+153.542289153 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.194583 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.195093 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.195737 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.695713371 +0000 UTC m=+153.543319792 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.219093 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fxzbs"] Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.297821 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.298276 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.798243331 +0000 UTC m=+153.645849752 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.345343 4647 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-ngdh7 container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.30:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.345451 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" podUID="dcf09f40-829e-444a-9ff3-2cd3ca8a72a5" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.30:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.346823 4647 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-ngdh7 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.30:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.346903 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" podUID="dcf09f40-829e-444a-9ff3-2cd3ca8a72a5" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.30:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.399183 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.399653 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:03.899637199 +0000 UTC m=+153.747243620 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.476372 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.502162 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.502655 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.002632573 +0000 UTC m=+153.850238994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.607545 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.607971 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.107942742 +0000 UTC m=+153.955549163 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.715796 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.716231 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.216215414 +0000 UTC m=+154.063821835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.766861 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gmr25"] Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.826866 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.827287 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.327272766 +0000 UTC m=+154.174879187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.893663 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gwwpv"] Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.931323 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:03 crc kubenswrapper[4647]: E1128 15:27:03.932221 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.432196014 +0000 UTC m=+154.279802435 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.967522 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmr25" event={"ID":"b87a797d-90cb-4417-b12d-ff15e5776e06","Type":"ContainerStarted","Data":"023f2905ece168b649dbfb55707c4f07f172a36cb772c91a0ce319f22d1511de"} Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.968439 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ab07a51d-c81c-475b-8472-4bcadd4cdc03","Type":"ContainerStarted","Data":"52de4fbb7606741c3e4885cfdf2f7a4e7e7aa789006e2dd1914c2b5f468aa285"} Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.969453 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gwwpv" event={"ID":"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026","Type":"ContainerStarted","Data":"c436ae75f7a4302d75440096ac04355332f098fe17689544daf49bc871c8068d"} Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.970870 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxzbs" event={"ID":"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb","Type":"ContainerStarted","Data":"ed2b5a430dc0494ae1acddec32a8069aadce04f7590aefcb4a2f62ab4f02b760"} Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.971929 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gbwhg"] Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.973524 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:03 crc kubenswrapper[4647]: I1128 15:27:03.977882 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.002592 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gbwhg"] Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.033445 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwpgj\" (UniqueName: \"kubernetes.io/projected/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-kube-api-access-xwpgj\") pod \"redhat-marketplace-gbwhg\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.033608 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-catalog-content\") pod \"redhat-marketplace-gbwhg\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.033690 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-utilities\") pod \"redhat-marketplace-gbwhg\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.033764 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.061235 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.561214723 +0000 UTC m=+154.408821144 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.119741 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g4rzr"] Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.133573 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:04 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:04 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:04 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.134020 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.135334 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.135543 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.635517695 +0000 UTC m=+154.483124126 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.135979 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwpgj\" (UniqueName: \"kubernetes.io/projected/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-kube-api-access-xwpgj\") pod \"redhat-marketplace-gbwhg\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.136100 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-catalog-content\") pod \"redhat-marketplace-gbwhg\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.136187 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-utilities\") pod \"redhat-marketplace-gbwhg\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.136270 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.136684 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.636675458 +0000 UTC m=+154.484281879 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.137445 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-utilities\") pod \"redhat-marketplace-gbwhg\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.137534 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-catalog-content\") pod \"redhat-marketplace-gbwhg\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.167635 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwpgj\" (UniqueName: \"kubernetes.io/projected/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-kube-api-access-xwpgj\") pod \"redhat-marketplace-gbwhg\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.184188 4647 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.237123 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.237574 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.737551991 +0000 UTC m=+154.585158412 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.318705 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.338513 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.339044 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.839020582 +0000 UTC m=+154.686627003 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.364384 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cf7jh"] Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.365592 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.388191 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cf7jh"] Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.439403 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.439691 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.939639728 +0000 UTC m=+154.787246149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.439977 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.440084 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-catalog-content\") pod \"redhat-marketplace-cf7jh\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.440204 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8m9m\" (UniqueName: \"kubernetes.io/projected/5b22190a-1058-4b79-a0a8-ac511c568b3d-kube-api-access-c8m9m\") pod \"redhat-marketplace-cf7jh\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.440343 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-utilities\") pod \"redhat-marketplace-cf7jh\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.440377 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:04.940368698 +0000 UTC m=+154.787975119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.515549 4647 patch_prober.go:28] interesting pod/apiserver-76f77b778f-l52jr container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 28 15:27:04 crc kubenswrapper[4647]: [+]log ok Nov 28 15:27:04 crc kubenswrapper[4647]: [+]etcd ok Nov 28 15:27:04 crc kubenswrapper[4647]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 28 15:27:04 crc kubenswrapper[4647]: [+]poststarthook/generic-apiserver-start-informers ok Nov 28 15:27:04 crc kubenswrapper[4647]: [+]poststarthook/max-in-flight-filter ok Nov 28 15:27:04 crc kubenswrapper[4647]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 28 15:27:04 crc kubenswrapper[4647]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 28 15:27:04 crc kubenswrapper[4647]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 28 15:27:04 crc kubenswrapper[4647]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 28 15:27:04 crc kubenswrapper[4647]: [+]poststarthook/project.openshift.io-projectcache ok Nov 28 15:27:04 crc kubenswrapper[4647]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 28 15:27:04 crc kubenswrapper[4647]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Nov 28 15:27:04 crc kubenswrapper[4647]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 28 15:27:04 crc kubenswrapper[4647]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 28 15:27:04 crc kubenswrapper[4647]: livez check failed Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.515669 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" podUID="d4458ea3-1882-4206-b1b2-885143795954" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.545136 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.545477 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-utilities\") pod \"redhat-marketplace-cf7jh\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.545538 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-catalog-content\") pod \"redhat-marketplace-cf7jh\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.545571 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8m9m\" (UniqueName: \"kubernetes.io/projected/5b22190a-1058-4b79-a0a8-ac511c568b3d-kube-api-access-c8m9m\") pod \"redhat-marketplace-cf7jh\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.546141 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:05.04612513 +0000 UTC m=+154.893731551 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.546607 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-utilities\") pod \"redhat-marketplace-cf7jh\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.546860 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-catalog-content\") pod \"redhat-marketplace-cf7jh\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.593946 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8m9m\" (UniqueName: \"kubernetes.io/projected/5b22190a-1058-4b79-a0a8-ac511c568b3d-kube-api-access-c8m9m\") pod \"redhat-marketplace-cf7jh\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.647557 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.648093 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:05.148076394 +0000 UTC m=+154.995682815 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.673328 4647 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-28T15:27:04.184223783Z","Handler":null,"Name":""} Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.678929 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.748470 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.748949 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:05.248912655 +0000 UTC m=+155.096519076 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.851816 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.852140 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:27:05.352123155 +0000 UTC m=+155.199729576 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-76zr7" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.953063 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:04 crc kubenswrapper[4647]: E1128 15:27:04.953598 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:27:05.453578385 +0000 UTC m=+155.301184806 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.969561 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bfrkx"] Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.969712 4647 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.969759 4647 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.970806 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:04 crc kubenswrapper[4647]: I1128 15:27:04.983133 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.002963 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bfrkx"] Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.031668 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gbwhg"] Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.051230 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-glkws" event={"ID":"00e78d31-cbbb-4ee5-b687-14e01b2761df","Type":"ContainerStarted","Data":"eda722d21232c5349d3cacbabb8f5fb573599ace220746b7d3058046c95244c9"} Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.055808 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.055915 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-catalog-content\") pod \"redhat-operators-bfrkx\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.055973 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-utilities\") pod \"redhat-operators-bfrkx\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.056084 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psmsp\" (UniqueName: \"kubernetes.io/projected/019fb61c-0e19-4aa4-b042-970be7ad9119-kube-api-access-psmsp\") pod \"redhat-operators-bfrkx\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.058990 4647 generic.go:334] "Generic (PLEG): container finished" podID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerID="8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1" exitCode=0 Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.059092 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxzbs" event={"ID":"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb","Type":"ContainerDied","Data":"8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1"} Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.061666 4647 generic.go:334] "Generic (PLEG): container finished" podID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerID="ddc6eda08d8d35995477efb2b6ebb867fb4c7c007d16edfc9e84cab43fad2df6" exitCode=0 Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.061711 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmr25" event={"ID":"b87a797d-90cb-4417-b12d-ff15e5776e06","Type":"ContainerDied","Data":"ddc6eda08d8d35995477efb2b6ebb867fb4c7c007d16edfc9e84cab43fad2df6"} Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.063653 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ab07a51d-c81c-475b-8472-4bcadd4cdc03","Type":"ContainerStarted","Data":"a66dddb49b1a430f2708ec6acc5815055b75bad83a6bffc2c3fe3392d0c4ce51"} Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.065233 4647 generic.go:334] "Generic (PLEG): container finished" podID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerID="370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6" exitCode=0 Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.065338 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4rzr" event={"ID":"10667e93-79c8-470d-a8a1-548e55a41f0e","Type":"ContainerDied","Data":"370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6"} Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.065356 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4rzr" event={"ID":"10667e93-79c8-470d-a8a1-548e55a41f0e","Type":"ContainerStarted","Data":"5fe894ed6a6b2c4040fb2a2e98064d20c86ed33b5000fb4f52e6416666968826"} Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.083999 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.100628 4647 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.101106 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.113171 4647 generic.go:334] "Generic (PLEG): container finished" podID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerID="13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059" exitCode=0 Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.113227 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gwwpv" event={"ID":"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026","Type":"ContainerDied","Data":"13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059"} Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.138642 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:05 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:05 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:05 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.138722 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.157789 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-catalog-content\") pod \"redhat-operators-bfrkx\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.157892 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-utilities\") pod \"redhat-operators-bfrkx\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.157919 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psmsp\" (UniqueName: \"kubernetes.io/projected/019fb61c-0e19-4aa4-b042-970be7ad9119-kube-api-access-psmsp\") pod \"redhat-operators-bfrkx\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.158654 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-catalog-content\") pod \"redhat-operators-bfrkx\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.160439 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-utilities\") pod \"redhat-operators-bfrkx\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.205041 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psmsp\" (UniqueName: \"kubernetes.io/projected/019fb61c-0e19-4aa4-b042-970be7ad9119-kube-api-access-psmsp\") pod \"redhat-operators-bfrkx\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.226191 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=4.2261709960000005 podStartE2EDuration="4.226170996s" podCreationTimestamp="2025-11-28 15:27:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:27:05.225534238 +0000 UTC m=+155.073140659" watchObservedRunningTime="2025-11-28 15:27:05.226170996 +0000 UTC m=+155.073777417" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.322156 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-76zr7\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.346862 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.360094 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.367239 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ngdh7" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.386805 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zlhvv"] Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.447340 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.458214 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.463587 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zlhvv"] Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.504905 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmwqh\" (UniqueName: \"kubernetes.io/projected/343a4b6a-4616-462e-89e4-f3087ac0fd72-kube-api-access-kmwqh\") pod \"redhat-operators-zlhvv\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.505016 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-catalog-content\") pod \"redhat-operators-zlhvv\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.505133 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-utilities\") pod \"redhat-operators-zlhvv\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.560315 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.561336 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.576974 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.577603 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.583860 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.610777 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-utilities\") pod \"redhat-operators-zlhvv\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.610864 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0304eafc-44e6-4d82-96ca-e2e2a22724d4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.610905 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0304eafc-44e6-4d82-96ca-e2e2a22724d4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.610926 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmwqh\" (UniqueName: \"kubernetes.io/projected/343a4b6a-4616-462e-89e4-f3087ac0fd72-kube-api-access-kmwqh\") pod \"redhat-operators-zlhvv\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.610949 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-catalog-content\") pod \"redhat-operators-zlhvv\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.611430 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-catalog-content\") pod \"redhat-operators-zlhvv\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.612056 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-utilities\") pod \"redhat-operators-zlhvv\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.637170 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmwqh\" (UniqueName: \"kubernetes.io/projected/343a4b6a-4616-462e-89e4-f3087ac0fd72-kube-api-access-kmwqh\") pod \"redhat-operators-zlhvv\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.711950 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0304eafc-44e6-4d82-96ca-e2e2a22724d4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.712992 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0304eafc-44e6-4d82-96ca-e2e2a22724d4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.713093 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0304eafc-44e6-4d82-96ca-e2e2a22724d4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.713772 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cf7jh"] Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.754456 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0304eafc-44e6-4d82-96ca-e2e2a22724d4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.761969 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.823588 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:27:05 crc kubenswrapper[4647]: I1128 15:27:05.894805 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.018365 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bfrkx"] Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.101434 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-76zr7"] Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.190331 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zlhvv"] Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.215950 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:06 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:06 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:06 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.216016 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.257142 4647 generic.go:334] "Generic (PLEG): container finished" podID="ab07a51d-c81c-475b-8472-4bcadd4cdc03" containerID="a66dddb49b1a430f2708ec6acc5815055b75bad83a6bffc2c3fe3392d0c4ce51" exitCode=0 Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.257308 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ab07a51d-c81c-475b-8472-4bcadd4cdc03","Type":"ContainerDied","Data":"a66dddb49b1a430f2708ec6acc5815055b75bad83a6bffc2c3fe3392d0c4ce51"} Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.259891 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbwhg" event={"ID":"4c861dff-9b7a-4c6c-b504-014c4afe4dc3","Type":"ContainerStarted","Data":"2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d"} Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.259956 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbwhg" event={"ID":"4c861dff-9b7a-4c6c-b504-014c4afe4dc3","Type":"ContainerStarted","Data":"2a3d68f407aabc106760d9865d5805064fd13dde79aecee69667f7fc702e23f4"} Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.266226 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cf7jh" event={"ID":"5b22190a-1058-4b79-a0a8-ac511c568b3d","Type":"ContainerStarted","Data":"6e144a375f84503a3acaffb7e39751b7be972842682bec900a90577f2f5a3cb0"} Nov 28 15:27:06 crc kubenswrapper[4647]: W1128 15:27:06.271718 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod019fb61c_0e19_4aa4_b042_970be7ad9119.slice/crio-b0933ae08e61952a9edf48915e1223858858b7175535b9c137a93fcb03ea98b5 WatchSource:0}: Error finding container b0933ae08e61952a9edf48915e1223858858b7175535b9c137a93fcb03ea98b5: Status 404 returned error can't find the container with id b0933ae08e61952a9edf48915e1223858858b7175535b9c137a93fcb03ea98b5 Nov 28 15:27:06 crc kubenswrapper[4647]: W1128 15:27:06.274122 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ab07d56_17ed_4c33_a43f_181e5ab30502.slice/crio-dea3637b3132403e676ef88703c4214bf74702c6f014e973eadbc1b60b6232f2 WatchSource:0}: Error finding container dea3637b3132403e676ef88703c4214bf74702c6f014e973eadbc1b60b6232f2: Status 404 returned error can't find the container with id dea3637b3132403e676ef88703c4214bf74702c6f014e973eadbc1b60b6232f2 Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.401333 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 28 15:27:06 crc kubenswrapper[4647]: I1128 15:27:06.534148 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.140889 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:07 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:07 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:07 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.141347 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.341312 4647 generic.go:334] "Generic (PLEG): container finished" podID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerID="2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d" exitCode=0 Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.341639 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbwhg" event={"ID":"4c861dff-9b7a-4c6c-b504-014c4afe4dc3","Type":"ContainerDied","Data":"2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.380020 4647 generic.go:334] "Generic (PLEG): container finished" podID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerID="b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95" exitCode=0 Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.380629 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cf7jh" event={"ID":"5b22190a-1058-4b79-a0a8-ac511c568b3d","Type":"ContainerDied","Data":"b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.417607 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0304eafc-44e6-4d82-96ca-e2e2a22724d4","Type":"ContainerStarted","Data":"01c2e39cad2b30b004b258b9b9e096de72a15eb4151ee787f1478a128508de98"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.418427 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0304eafc-44e6-4d82-96ca-e2e2a22724d4","Type":"ContainerStarted","Data":"19f67a9b8cbaf443815a6efbfe7393e275ffad7e07c90156ec4764d7252a9f5b"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.466210 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-glkws" event={"ID":"00e78d31-cbbb-4ee5-b687-14e01b2761df","Type":"ContainerStarted","Data":"64897827d38af110d277a0d43590b480a60cc7f4de8eea20346ac5603169e31d"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.475630 4647 generic.go:334] "Generic (PLEG): container finished" podID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerID="742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe" exitCode=0 Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.475736 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfrkx" event={"ID":"019fb61c-0e19-4aa4-b042-970be7ad9119","Type":"ContainerDied","Data":"742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.475773 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfrkx" event={"ID":"019fb61c-0e19-4aa4-b042-970be7ad9119","Type":"ContainerStarted","Data":"b0933ae08e61952a9edf48915e1223858858b7175535b9c137a93fcb03ea98b5"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.482689 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" event={"ID":"6ab07d56-17ed-4c33-a43f-181e5ab30502","Type":"ContainerStarted","Data":"1190b6b58b034861d5481989e37d34be8baec7616c8c95c85462a880b669753a"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.482750 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" event={"ID":"6ab07d56-17ed-4c33-a43f-181e5ab30502","Type":"ContainerStarted","Data":"dea3637b3132403e676ef88703c4214bf74702c6f014e973eadbc1b60b6232f2"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.483813 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.501376 4647 generic.go:334] "Generic (PLEG): container finished" podID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerID="c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c" exitCode=0 Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.501821 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zlhvv" event={"ID":"343a4b6a-4616-462e-89e4-f3087ac0fd72","Type":"ContainerDied","Data":"c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.501861 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zlhvv" event={"ID":"343a4b6a-4616-462e-89e4-f3087ac0fd72","Type":"ContainerStarted","Data":"e9c3d3584b6b9c98ba019cbd585f7006cddf8946defafecf5e85ba521a4cab8b"} Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.524655 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.524625681 podStartE2EDuration="2.524625681s" podCreationTimestamp="2025-11-28 15:27:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:27:07.448290972 +0000 UTC m=+157.295897403" watchObservedRunningTime="2025-11-28 15:27:07.524625681 +0000 UTC m=+157.372232102" Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.530881 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-glkws" podStartSLOduration=18.530861678 podStartE2EDuration="18.530861678s" podCreationTimestamp="2025-11-28 15:26:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:27:07.487669856 +0000 UTC m=+157.335276277" watchObservedRunningTime="2025-11-28 15:27:07.530861678 +0000 UTC m=+157.378468099" Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.554592 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" podStartSLOduration=135.554560188 podStartE2EDuration="2m15.554560188s" podCreationTimestamp="2025-11-28 15:24:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:27:07.513135566 +0000 UTC m=+157.360741987" watchObservedRunningTime="2025-11-28 15:27:07.554560188 +0000 UTC m=+157.402166609" Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.902293 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.989558 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kubelet-dir\") pod \"ab07a51d-c81c-475b-8472-4bcadd4cdc03\" (UID: \"ab07a51d-c81c-475b-8472-4bcadd4cdc03\") " Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.989703 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kube-api-access\") pod \"ab07a51d-c81c-475b-8472-4bcadd4cdc03\" (UID: \"ab07a51d-c81c-475b-8472-4bcadd4cdc03\") " Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.990025 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ab07a51d-c81c-475b-8472-4bcadd4cdc03" (UID: "ab07a51d-c81c-475b-8472-4bcadd4cdc03"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:27:07 crc kubenswrapper[4647]: I1128 15:27:07.990186 4647 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.018957 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ab07a51d-c81c-475b-8472-4bcadd4cdc03" (UID: "ab07a51d-c81c-475b-8472-4bcadd4cdc03"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.091486 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ab07a51d-c81c-475b-8472-4bcadd4cdc03-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.122201 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.135574 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:08 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:08 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:08 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.135663 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-l52jr" Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.135699 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.576778 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ab07a51d-c81c-475b-8472-4bcadd4cdc03","Type":"ContainerDied","Data":"52de4fbb7606741c3e4885cfdf2f7a4e7e7aa789006e2dd1914c2b5f468aa285"} Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.576832 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52de4fbb7606741c3e4885cfdf2f7a4e7e7aa789006e2dd1914c2b5f468aa285" Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.576872 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.581372 4647 generic.go:334] "Generic (PLEG): container finished" podID="f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6" containerID="ad85127e24b570fc14376b2b3dcccc013586fbd7d0a56b910c1a95adb7ea43d2" exitCode=0 Nov 28 15:27:08 crc kubenswrapper[4647]: I1128 15:27:08.583719 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" event={"ID":"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6","Type":"ContainerDied","Data":"ad85127e24b570fc14376b2b3dcccc013586fbd7d0a56b910c1a95adb7ea43d2"} Nov 28 15:27:09 crc kubenswrapper[4647]: I1128 15:27:09.130758 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:09 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:09 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:09 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:09 crc kubenswrapper[4647]: I1128 15:27:09.130946 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:09 crc kubenswrapper[4647]: I1128 15:27:09.652357 4647 generic.go:334] "Generic (PLEG): container finished" podID="0304eafc-44e6-4d82-96ca-e2e2a22724d4" containerID="01c2e39cad2b30b004b258b9b9e096de72a15eb4151ee787f1478a128508de98" exitCode=0 Nov 28 15:27:09 crc kubenswrapper[4647]: I1128 15:27:09.653015 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0304eafc-44e6-4d82-96ca-e2e2a22724d4","Type":"ContainerDied","Data":"01c2e39cad2b30b004b258b9b9e096de72a15eb4151ee787f1478a128508de98"} Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.126984 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:10 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:10 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:10 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.127576 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.241133 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.329029 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-config-volume\") pod \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.329112 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g56gz\" (UniqueName: \"kubernetes.io/projected/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-kube-api-access-g56gz\") pod \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.329191 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-secret-volume\") pod \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\" (UID: \"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6\") " Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.336027 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-config-volume" (OuterVolumeSpecName: "config-volume") pod "f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6" (UID: "f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.336801 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6" (UID: "f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.342197 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-kube-api-access-g56gz" (OuterVolumeSpecName: "kube-api-access-g56gz") pod "f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6" (UID: "f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6"). InnerVolumeSpecName "kube-api-access-g56gz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.431280 4647 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.431318 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g56gz\" (UniqueName: \"kubernetes.io/projected/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-kube-api-access-g56gz\") on node \"crc\" DevicePath \"\"" Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.431334 4647 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.672239 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.672497 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q" event={"ID":"f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6","Type":"ContainerDied","Data":"9e18102ea52b0bcb3258ee01b310e1283354303143dff520d447794dfe8f100a"} Nov 28 15:27:10 crc kubenswrapper[4647]: I1128 15:27:10.672549 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e18102ea52b0bcb3258ee01b310e1283354303143dff520d447794dfe8f100a" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.127385 4647 patch_prober.go:28] interesting pod/router-default-5444994796-t88ct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:27:11 crc kubenswrapper[4647]: [-]has-synced failed: reason withheld Nov 28 15:27:11 crc kubenswrapper[4647]: [+]process-running ok Nov 28 15:27:11 crc kubenswrapper[4647]: healthz check failed Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.128139 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-t88ct" podUID="a2609685-750c-4716-8f93-a37049032177" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.229663 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-lkh97" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.253225 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.345510 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kube-api-access\") pod \"0304eafc-44e6-4d82-96ca-e2e2a22724d4\" (UID: \"0304eafc-44e6-4d82-96ca-e2e2a22724d4\") " Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.345631 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kubelet-dir\") pod \"0304eafc-44e6-4d82-96ca-e2e2a22724d4\" (UID: \"0304eafc-44e6-4d82-96ca-e2e2a22724d4\") " Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.345752 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "0304eafc-44e6-4d82-96ca-e2e2a22724d4" (UID: "0304eafc-44e6-4d82-96ca-e2e2a22724d4"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.346026 4647 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.352065 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0304eafc-44e6-4d82-96ca-e2e2a22724d4" (UID: "0304eafc-44e6-4d82-96ca-e2e2a22724d4"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.447487 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0304eafc-44e6-4d82-96ca-e2e2a22724d4-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.714645 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0304eafc-44e6-4d82-96ca-e2e2a22724d4","Type":"ContainerDied","Data":"19f67a9b8cbaf443815a6efbfe7393e275ffad7e07c90156ec4764d7252a9f5b"} Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.714709 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19f67a9b8cbaf443815a6efbfe7393e275ffad7e07c90156ec4764d7252a9f5b" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.714891 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.825547 4647 patch_prober.go:28] interesting pod/downloads-7954f5f757-7z7l2 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.826015 4647 patch_prober.go:28] interesting pod/downloads-7954f5f757-7z7l2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.826039 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7z7l2" podUID="5e711623-78c2-4e1f-a65f-1c4d871e3d21" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 15:27:11 crc kubenswrapper[4647]: I1128 15:27:11.826070 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7z7l2" podUID="5e711623-78c2-4e1f-a65f-1c4d871e3d21" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.20:8080/\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 28 15:27:12 crc kubenswrapper[4647]: I1128 15:27:12.150694 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:27:12 crc kubenswrapper[4647]: I1128 15:27:12.154519 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-t88ct" Nov 28 15:27:13 crc kubenswrapper[4647]: I1128 15:27:13.035602 4647 patch_prober.go:28] interesting pod/console-f9d7485db-z99bl container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Nov 28 15:27:13 crc kubenswrapper[4647]: I1128 15:27:13.035692 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-z99bl" podUID="96144897-cea5-48a0-ad58-ccfa928aba03" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Nov 28 15:27:14 crc kubenswrapper[4647]: I1128 15:27:14.827003 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:27:14 crc kubenswrapper[4647]: I1128 15:27:14.835184 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a79b0b39-cffb-4ac3-a526-837c6aa70616-metrics-certs\") pod \"network-metrics-daemon-cz6sq\" (UID: \"a79b0b39-cffb-4ac3-a526-837c6aa70616\") " pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:27:15 crc kubenswrapper[4647]: I1128 15:27:15.015946 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-cz6sq" Nov 28 15:27:15 crc kubenswrapper[4647]: I1128 15:27:15.605856 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-cz6sq"] Nov 28 15:27:15 crc kubenswrapper[4647]: W1128 15:27:15.644521 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda79b0b39_cffb_4ac3_a526_837c6aa70616.slice/crio-488ac88a6d71b03ae98f2d41252d61d22c169cb0ae9af46ee207ec9dcce1119c WatchSource:0}: Error finding container 488ac88a6d71b03ae98f2d41252d61d22c169cb0ae9af46ee207ec9dcce1119c: Status 404 returned error can't find the container with id 488ac88a6d71b03ae98f2d41252d61d22c169cb0ae9af46ee207ec9dcce1119c Nov 28 15:27:15 crc kubenswrapper[4647]: I1128 15:27:15.886449 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" event={"ID":"a79b0b39-cffb-4ac3-a526-837c6aa70616","Type":"ContainerStarted","Data":"488ac88a6d71b03ae98f2d41252d61d22c169cb0ae9af46ee207ec9dcce1119c"} Nov 28 15:27:17 crc kubenswrapper[4647]: I1128 15:27:17.023203 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:27:17 crc kubenswrapper[4647]: I1128 15:27:17.023712 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:27:17 crc kubenswrapper[4647]: I1128 15:27:17.932075 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" event={"ID":"a79b0b39-cffb-4ac3-a526-837c6aa70616","Type":"ContainerStarted","Data":"13d5cfab16ff8624c7a408bcada4882b2e8ffbba75998941d564cac388efdfc1"} Nov 28 15:27:21 crc kubenswrapper[4647]: I1128 15:27:21.830632 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-7z7l2" Nov 28 15:27:23 crc kubenswrapper[4647]: I1128 15:27:23.039123 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:27:23 crc kubenswrapper[4647]: I1128 15:27:23.043547 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:27:25 crc kubenswrapper[4647]: I1128 15:27:25.464829 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:27:32 crc kubenswrapper[4647]: I1128 15:27:32.535582 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rmbtx" Nov 28 15:27:39 crc kubenswrapper[4647]: I1128 15:27:39.907784 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.598974 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 15:27:45 crc kubenswrapper[4647]: E1128 15:27:45.600237 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6" containerName="collect-profiles" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.600263 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6" containerName="collect-profiles" Nov 28 15:27:45 crc kubenswrapper[4647]: E1128 15:27:45.600283 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab07a51d-c81c-475b-8472-4bcadd4cdc03" containerName="pruner" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.600295 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab07a51d-c81c-475b-8472-4bcadd4cdc03" containerName="pruner" Nov 28 15:27:45 crc kubenswrapper[4647]: E1128 15:27:45.600343 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0304eafc-44e6-4d82-96ca-e2e2a22724d4" containerName="pruner" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.600358 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0304eafc-44e6-4d82-96ca-e2e2a22724d4" containerName="pruner" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.600575 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6" containerName="collect-profiles" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.600596 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="0304eafc-44e6-4d82-96ca-e2e2a22724d4" containerName="pruner" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.600614 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab07a51d-c81c-475b-8472-4bcadd4cdc03" containerName="pruner" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.601302 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.605801 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.614338 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.635080 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.790748 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/60e93f81-f2e3-417a-a26f-14214daf2f20-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"60e93f81-f2e3-417a-a26f-14214daf2f20\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.790846 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/60e93f81-f2e3-417a-a26f-14214daf2f20-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"60e93f81-f2e3-417a-a26f-14214daf2f20\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.893275 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/60e93f81-f2e3-417a-a26f-14214daf2f20-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"60e93f81-f2e3-417a-a26f-14214daf2f20\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.893356 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/60e93f81-f2e3-417a-a26f-14214daf2f20-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"60e93f81-f2e3-417a-a26f-14214daf2f20\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:27:45 crc kubenswrapper[4647]: I1128 15:27:45.893649 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/60e93f81-f2e3-417a-a26f-14214daf2f20-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"60e93f81-f2e3-417a-a26f-14214daf2f20\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:27:46 crc kubenswrapper[4647]: I1128 15:27:46.485365 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/60e93f81-f2e3-417a-a26f-14214daf2f20-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"60e93f81-f2e3-417a-a26f-14214daf2f20\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:27:46 crc kubenswrapper[4647]: I1128 15:27:46.764885 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:27:47 crc kubenswrapper[4647]: I1128 15:27:47.022552 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:27:47 crc kubenswrapper[4647]: I1128 15:27:47.022644 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:27:50 crc kubenswrapper[4647]: I1128 15:27:50.809539 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 15:27:50 crc kubenswrapper[4647]: I1128 15:27:50.811970 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:50 crc kubenswrapper[4647]: I1128 15:27:50.820612 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 15:27:50 crc kubenswrapper[4647]: I1128 15:27:50.981831 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25dcf47c-400f-4533-9338-0124305e046c-kube-api-access\") pod \"installer-9-crc\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:50 crc kubenswrapper[4647]: I1128 15:27:50.981921 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-var-lock\") pod \"installer-9-crc\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:50 crc kubenswrapper[4647]: I1128 15:27:50.982135 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-kubelet-dir\") pod \"installer-9-crc\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:51 crc kubenswrapper[4647]: I1128 15:27:51.090062 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25dcf47c-400f-4533-9338-0124305e046c-kube-api-access\") pod \"installer-9-crc\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:51 crc kubenswrapper[4647]: I1128 15:27:51.090664 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-var-lock\") pod \"installer-9-crc\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:51 crc kubenswrapper[4647]: I1128 15:27:51.090891 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-kubelet-dir\") pod \"installer-9-crc\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:51 crc kubenswrapper[4647]: I1128 15:27:51.091038 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-kubelet-dir\") pod \"installer-9-crc\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:51 crc kubenswrapper[4647]: I1128 15:27:51.091291 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-var-lock\") pod \"installer-9-crc\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:51 crc kubenswrapper[4647]: I1128 15:27:51.128855 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25dcf47c-400f-4533-9338-0124305e046c-kube-api-access\") pod \"installer-9-crc\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:51 crc kubenswrapper[4647]: I1128 15:27:51.153807 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:27:54 crc kubenswrapper[4647]: E1128 15:27:54.087584 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 15:27:54 crc kubenswrapper[4647]: E1128 15:27:54.088231 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b887v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-g4rzr_openshift-marketplace(10667e93-79c8-470d-a8a1-548e55a41f0e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:27:54 crc kubenswrapper[4647]: E1128 15:27:54.089621 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-g4rzr" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" Nov 28 15:27:59 crc kubenswrapper[4647]: E1128 15:27:59.515618 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-g4rzr" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" Nov 28 15:28:07 crc kubenswrapper[4647]: E1128 15:28:07.159567 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 15:28:07 crc kubenswrapper[4647]: E1128 15:28:07.160547 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-psmsp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-bfrkx_openshift-marketplace(019fb61c-0e19-4aa4-b042-970be7ad9119): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:28:07 crc kubenswrapper[4647]: E1128 15:28:07.162113 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-bfrkx" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" Nov 28 15:28:09 crc kubenswrapper[4647]: E1128 15:28:09.130108 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-bfrkx" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" Nov 28 15:28:10 crc kubenswrapper[4647]: E1128 15:28:10.241073 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 15:28:10 crc kubenswrapper[4647]: E1128 15:28:10.241487 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c8m9m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-cf7jh_openshift-marketplace(5b22190a-1058-4b79-a0a8-ac511c568b3d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:28:10 crc kubenswrapper[4647]: E1128 15:28:10.242873 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-cf7jh" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" Nov 28 15:28:10 crc kubenswrapper[4647]: E1128 15:28:10.708983 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 15:28:10 crc kubenswrapper[4647]: E1128 15:28:10.709213 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tckdm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-gwwpv_openshift-marketplace(1dc48c84-53aa-4ee7-b192-0f7f4fcd0026): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:28:10 crc kubenswrapper[4647]: E1128 15:28:10.710911 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-gwwpv" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" Nov 28 15:28:11 crc kubenswrapper[4647]: E1128 15:28:11.163076 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 15:28:11 crc kubenswrapper[4647]: E1128 15:28:11.163492 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kmwqh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-zlhvv_openshift-marketplace(343a4b6a-4616-462e-89e4-f3087ac0fd72): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:28:11 crc kubenswrapper[4647]: E1128 15:28:11.164931 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-zlhvv" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" Nov 28 15:28:11 crc kubenswrapper[4647]: E1128 15:28:11.404215 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 15:28:11 crc kubenswrapper[4647]: E1128 15:28:11.404463 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xwpgj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-gbwhg_openshift-marketplace(4c861dff-9b7a-4c6c-b504-014c4afe4dc3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:28:11 crc kubenswrapper[4647]: E1128 15:28:11.405638 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-gbwhg" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" Nov 28 15:28:12 crc kubenswrapper[4647]: E1128 15:28:12.272125 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-zlhvv" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" Nov 28 15:28:12 crc kubenswrapper[4647]: E1128 15:28:12.272862 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-cf7jh" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" Nov 28 15:28:12 crc kubenswrapper[4647]: E1128 15:28:12.272878 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-gwwpv" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" Nov 28 15:28:12 crc kubenswrapper[4647]: E1128 15:28:12.421699 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 15:28:12 crc kubenswrapper[4647]: E1128 15:28:12.422256 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rqxl4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-fxzbs_openshift-marketplace(c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:28:12 crc kubenswrapper[4647]: E1128 15:28:12.423223 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-gbwhg" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" Nov 28 15:28:12 crc kubenswrapper[4647]: E1128 15:28:12.423379 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-fxzbs" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" Nov 28 15:28:12 crc kubenswrapper[4647]: I1128 15:28:12.562612 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 15:28:12 crc kubenswrapper[4647]: I1128 15:28:12.638682 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 15:28:13 crc kubenswrapper[4647]: E1128 15:28:13.322127 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 15:28:13 crc kubenswrapper[4647]: E1128 15:28:13.322720 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nzsl6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-gmr25_openshift-marketplace(b87a797d-90cb-4417-b12d-ff15e5776e06): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:28:13 crc kubenswrapper[4647]: E1128 15:28:13.324167 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-gmr25" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" Nov 28 15:28:13 crc kubenswrapper[4647]: I1128 15:28:13.422803 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"60e93f81-f2e3-417a-a26f-14214daf2f20","Type":"ContainerStarted","Data":"e5e8c6ebcae556207fdcea832ea2af40427805844feda94655b3e6e71cef1057"} Nov 28 15:28:13 crc kubenswrapper[4647]: I1128 15:28:13.424245 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-cz6sq" event={"ID":"a79b0b39-cffb-4ac3-a526-837c6aa70616","Type":"ContainerStarted","Data":"61b26a40e5ac12ec23ab778057140a86f3e02d4f06efa9d7a9190b9aeccce516"} Nov 28 15:28:13 crc kubenswrapper[4647]: I1128 15:28:13.426778 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"25dcf47c-400f-4533-9338-0124305e046c","Type":"ContainerStarted","Data":"45d4278486d412e092d846990c6c519f7548dc7b7a471b7af6d160704a8360b1"} Nov 28 15:28:13 crc kubenswrapper[4647]: E1128 15:28:13.429366 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-gmr25" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" Nov 28 15:28:13 crc kubenswrapper[4647]: E1128 15:28:13.430840 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-fxzbs" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" Nov 28 15:28:13 crc kubenswrapper[4647]: I1128 15:28:13.519012 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-cz6sq" podStartSLOduration=202.518986564 podStartE2EDuration="3m22.518986564s" podCreationTimestamp="2025-11-28 15:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:28:13.481672439 +0000 UTC m=+223.329278900" watchObservedRunningTime="2025-11-28 15:28:13.518986564 +0000 UTC m=+223.366592985" Nov 28 15:28:14 crc kubenswrapper[4647]: I1128 15:28:14.432988 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"25dcf47c-400f-4533-9338-0124305e046c","Type":"ContainerStarted","Data":"67d73e31a7bb5b67c63d0ec71a27f1c4b9dbdc423e1c5f2b6681c646553cca94"} Nov 28 15:28:14 crc kubenswrapper[4647]: I1128 15:28:14.442743 4647 generic.go:334] "Generic (PLEG): container finished" podID="60e93f81-f2e3-417a-a26f-14214daf2f20" containerID="fafa3d5df9ded696df191dfda9c6a984c0842cf5382e35acc1f511dabe55c880" exitCode=0 Nov 28 15:28:14 crc kubenswrapper[4647]: I1128 15:28:14.442870 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"60e93f81-f2e3-417a-a26f-14214daf2f20","Type":"ContainerDied","Data":"fafa3d5df9ded696df191dfda9c6a984c0842cf5382e35acc1f511dabe55c880"} Nov 28 15:28:14 crc kubenswrapper[4647]: I1128 15:28:14.474889 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=24.474804441 podStartE2EDuration="24.474804441s" podCreationTimestamp="2025-11-28 15:27:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:28:14.457742398 +0000 UTC m=+224.305348819" watchObservedRunningTime="2025-11-28 15:28:14.474804441 +0000 UTC m=+224.322410862" Nov 28 15:28:15 crc kubenswrapper[4647]: I1128 15:28:15.450761 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4rzr" event={"ID":"10667e93-79c8-470d-a8a1-548e55a41f0e","Type":"ContainerStarted","Data":"06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9"} Nov 28 15:28:15 crc kubenswrapper[4647]: I1128 15:28:15.692302 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:28:15 crc kubenswrapper[4647]: I1128 15:28:15.786630 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/60e93f81-f2e3-417a-a26f-14214daf2f20-kubelet-dir\") pod \"60e93f81-f2e3-417a-a26f-14214daf2f20\" (UID: \"60e93f81-f2e3-417a-a26f-14214daf2f20\") " Nov 28 15:28:15 crc kubenswrapper[4647]: I1128 15:28:15.786773 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/60e93f81-f2e3-417a-a26f-14214daf2f20-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "60e93f81-f2e3-417a-a26f-14214daf2f20" (UID: "60e93f81-f2e3-417a-a26f-14214daf2f20"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:28:15 crc kubenswrapper[4647]: I1128 15:28:15.786821 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/60e93f81-f2e3-417a-a26f-14214daf2f20-kube-api-access\") pod \"60e93f81-f2e3-417a-a26f-14214daf2f20\" (UID: \"60e93f81-f2e3-417a-a26f-14214daf2f20\") " Nov 28 15:28:15 crc kubenswrapper[4647]: I1128 15:28:15.787121 4647 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/60e93f81-f2e3-417a-a26f-14214daf2f20-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:15 crc kubenswrapper[4647]: I1128 15:28:15.792810 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60e93f81-f2e3-417a-a26f-14214daf2f20-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "60e93f81-f2e3-417a-a26f-14214daf2f20" (UID: "60e93f81-f2e3-417a-a26f-14214daf2f20"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:15 crc kubenswrapper[4647]: I1128 15:28:15.888449 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/60e93f81-f2e3-417a-a26f-14214daf2f20-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:16 crc kubenswrapper[4647]: I1128 15:28:16.462597 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"60e93f81-f2e3-417a-a26f-14214daf2f20","Type":"ContainerDied","Data":"e5e8c6ebcae556207fdcea832ea2af40427805844feda94655b3e6e71cef1057"} Nov 28 15:28:16 crc kubenswrapper[4647]: I1128 15:28:16.462884 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:28:16 crc kubenswrapper[4647]: I1128 15:28:16.462901 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5e8c6ebcae556207fdcea832ea2af40427805844feda94655b3e6e71cef1057" Nov 28 15:28:16 crc kubenswrapper[4647]: I1128 15:28:16.466380 4647 generic.go:334] "Generic (PLEG): container finished" podID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerID="06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9" exitCode=0 Nov 28 15:28:16 crc kubenswrapper[4647]: I1128 15:28:16.466465 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4rzr" event={"ID":"10667e93-79c8-470d-a8a1-548e55a41f0e","Type":"ContainerDied","Data":"06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9"} Nov 28 15:28:17 crc kubenswrapper[4647]: I1128 15:28:17.023098 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:28:17 crc kubenswrapper[4647]: I1128 15:28:17.023729 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:28:17 crc kubenswrapper[4647]: I1128 15:28:17.023828 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:28:17 crc kubenswrapper[4647]: I1128 15:28:17.024868 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:28:17 crc kubenswrapper[4647]: I1128 15:28:17.025075 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4" gracePeriod=600 Nov 28 15:28:17 crc kubenswrapper[4647]: I1128 15:28:17.474288 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4rzr" event={"ID":"10667e93-79c8-470d-a8a1-548e55a41f0e","Type":"ContainerStarted","Data":"15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496"} Nov 28 15:28:17 crc kubenswrapper[4647]: I1128 15:28:17.476168 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4" exitCode=0 Nov 28 15:28:17 crc kubenswrapper[4647]: I1128 15:28:17.476212 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4"} Nov 28 15:28:17 crc kubenswrapper[4647]: I1128 15:28:17.476238 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"069845d01aad9e7b2035bd90b8d33350ac2a115f79a2e8bca3a165495ab5bb16"} Nov 28 15:28:17 crc kubenswrapper[4647]: I1128 15:28:17.492074 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-g4rzr" podStartSLOduration=3.702723846 podStartE2EDuration="1m15.492053249s" podCreationTimestamp="2025-11-28 15:27:02 +0000 UTC" firstStartedPulling="2025-11-28 15:27:05.083719536 +0000 UTC m=+154.931325957" lastFinishedPulling="2025-11-28 15:28:16.873048889 +0000 UTC m=+226.720655360" observedRunningTime="2025-11-28 15:28:17.489751364 +0000 UTC m=+227.337357795" watchObservedRunningTime="2025-11-28 15:28:17.492053249 +0000 UTC m=+227.339659670" Nov 28 15:28:22 crc kubenswrapper[4647]: I1128 15:28:22.728125 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:28:22 crc kubenswrapper[4647]: I1128 15:28:22.728736 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:28:22 crc kubenswrapper[4647]: I1128 15:28:22.805820 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:28:23 crc kubenswrapper[4647]: I1128 15:28:23.561677 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:28:25 crc kubenswrapper[4647]: I1128 15:28:25.634836 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g4rzr"] Nov 28 15:28:25 crc kubenswrapper[4647]: I1128 15:28:25.635469 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-g4rzr" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerName="registry-server" containerID="cri-o://15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496" gracePeriod=2 Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.077431 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.145949 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-utilities\") pod \"10667e93-79c8-470d-a8a1-548e55a41f0e\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.146115 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b887v\" (UniqueName: \"kubernetes.io/projected/10667e93-79c8-470d-a8a1-548e55a41f0e-kube-api-access-b887v\") pod \"10667e93-79c8-470d-a8a1-548e55a41f0e\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.146184 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-catalog-content\") pod \"10667e93-79c8-470d-a8a1-548e55a41f0e\" (UID: \"10667e93-79c8-470d-a8a1-548e55a41f0e\") " Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.147765 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-utilities" (OuterVolumeSpecName: "utilities") pod "10667e93-79c8-470d-a8a1-548e55a41f0e" (UID: "10667e93-79c8-470d-a8a1-548e55a41f0e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.154735 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10667e93-79c8-470d-a8a1-548e55a41f0e-kube-api-access-b887v" (OuterVolumeSpecName: "kube-api-access-b887v") pod "10667e93-79c8-470d-a8a1-548e55a41f0e" (UID: "10667e93-79c8-470d-a8a1-548e55a41f0e"). InnerVolumeSpecName "kube-api-access-b887v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.221558 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "10667e93-79c8-470d-a8a1-548e55a41f0e" (UID: "10667e93-79c8-470d-a8a1-548e55a41f0e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.248376 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.248462 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10667e93-79c8-470d-a8a1-548e55a41f0e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.248481 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b887v\" (UniqueName: \"kubernetes.io/projected/10667e93-79c8-470d-a8a1-548e55a41f0e-kube-api-access-b887v\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.546040 4647 generic.go:334] "Generic (PLEG): container finished" podID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerID="15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496" exitCode=0 Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.546116 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g4rzr" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.546149 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4rzr" event={"ID":"10667e93-79c8-470d-a8a1-548e55a41f0e","Type":"ContainerDied","Data":"15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496"} Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.547564 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g4rzr" event={"ID":"10667e93-79c8-470d-a8a1-548e55a41f0e","Type":"ContainerDied","Data":"5fe894ed6a6b2c4040fb2a2e98064d20c86ed33b5000fb4f52e6416666968826"} Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.547620 4647 scope.go:117] "RemoveContainer" containerID="15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.550581 4647 generic.go:334] "Generic (PLEG): container finished" podID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerID="2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca" exitCode=0 Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.550656 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gwwpv" event={"ID":"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026","Type":"ContainerDied","Data":"2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca"} Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.557640 4647 generic.go:334] "Generic (PLEG): container finished" podID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerID="fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97" exitCode=0 Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.557709 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxzbs" event={"ID":"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb","Type":"ContainerDied","Data":"fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97"} Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.567057 4647 generic.go:334] "Generic (PLEG): container finished" podID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerID="e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7" exitCode=0 Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.567138 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfrkx" event={"ID":"019fb61c-0e19-4aa4-b042-970be7ad9119","Type":"ContainerDied","Data":"e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7"} Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.579539 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zlhvv" event={"ID":"343a4b6a-4616-462e-89e4-f3087ac0fd72","Type":"ContainerStarted","Data":"ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45"} Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.592619 4647 generic.go:334] "Generic (PLEG): container finished" podID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerID="85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41" exitCode=0 Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.592705 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbwhg" event={"ID":"4c861dff-9b7a-4c6c-b504-014c4afe4dc3","Type":"ContainerDied","Data":"85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41"} Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.602208 4647 generic.go:334] "Generic (PLEG): container finished" podID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerID="338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f" exitCode=0 Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.602269 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cf7jh" event={"ID":"5b22190a-1058-4b79-a0a8-ac511c568b3d","Type":"ContainerDied","Data":"338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f"} Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.608247 4647 scope.go:117] "RemoveContainer" containerID="06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.632045 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g4rzr"] Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.643773 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-g4rzr"] Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.656654 4647 scope.go:117] "RemoveContainer" containerID="370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.695572 4647 scope.go:117] "RemoveContainer" containerID="15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496" Nov 28 15:28:26 crc kubenswrapper[4647]: E1128 15:28:26.697003 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496\": container with ID starting with 15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496 not found: ID does not exist" containerID="15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.697134 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496"} err="failed to get container status \"15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496\": rpc error: code = NotFound desc = could not find container \"15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496\": container with ID starting with 15c2321acfb8dd096441a525bcdca8af9a1e3b4aa6d5b345d95ff40a08b3f496 not found: ID does not exist" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.697310 4647 scope.go:117] "RemoveContainer" containerID="06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9" Nov 28 15:28:26 crc kubenswrapper[4647]: E1128 15:28:26.697768 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9\": container with ID starting with 06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9 not found: ID does not exist" containerID="06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.697800 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9"} err="failed to get container status \"06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9\": rpc error: code = NotFound desc = could not find container \"06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9\": container with ID starting with 06b149d413c638249848010258ad3ff7ff4d4c667f0b1313da61063336044cb9 not found: ID does not exist" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.697817 4647 scope.go:117] "RemoveContainer" containerID="370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6" Nov 28 15:28:26 crc kubenswrapper[4647]: E1128 15:28:26.698163 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6\": container with ID starting with 370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6 not found: ID does not exist" containerID="370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6" Nov 28 15:28:26 crc kubenswrapper[4647]: I1128 15:28:26.698188 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6"} err="failed to get container status \"370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6\": rpc error: code = NotFound desc = could not find container \"370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6\": container with ID starting with 370f824fd2cb9303292261ce071501a04951b9d35c8123f2cecfa5f65ee544f6 not found: ID does not exist" Nov 28 15:28:27 crc kubenswrapper[4647]: I1128 15:28:27.610946 4647 generic.go:334] "Generic (PLEG): container finished" podID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerID="d25181b0d6b7960401e0a68079f6b2d4f4793794073b8be48908738b144a4c61" exitCode=0 Nov 28 15:28:27 crc kubenswrapper[4647]: I1128 15:28:27.611060 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmr25" event={"ID":"b87a797d-90cb-4417-b12d-ff15e5776e06","Type":"ContainerDied","Data":"d25181b0d6b7960401e0a68079f6b2d4f4793794073b8be48908738b144a4c61"} Nov 28 15:28:28 crc kubenswrapper[4647]: I1128 15:28:28.400832 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" path="/var/lib/kubelet/pods/10667e93-79c8-470d-a8a1-548e55a41f0e/volumes" Nov 28 15:28:28 crc kubenswrapper[4647]: I1128 15:28:28.620290 4647 generic.go:334] "Generic (PLEG): container finished" podID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerID="ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45" exitCode=0 Nov 28 15:28:28 crc kubenswrapper[4647]: I1128 15:28:28.620343 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zlhvv" event={"ID":"343a4b6a-4616-462e-89e4-f3087ac0fd72","Type":"ContainerDied","Data":"ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45"} Nov 28 15:28:29 crc kubenswrapper[4647]: I1128 15:28:29.627102 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cf7jh" event={"ID":"5b22190a-1058-4b79-a0a8-ac511c568b3d","Type":"ContainerStarted","Data":"bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952"} Nov 28 15:28:29 crc kubenswrapper[4647]: I1128 15:28:29.630295 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gwwpv" event={"ID":"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026","Type":"ContainerStarted","Data":"e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd"} Nov 28 15:28:29 crc kubenswrapper[4647]: I1128 15:28:29.640805 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxzbs" event={"ID":"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb","Type":"ContainerStarted","Data":"2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e"} Nov 28 15:28:29 crc kubenswrapper[4647]: I1128 15:28:29.654827 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbwhg" event={"ID":"4c861dff-9b7a-4c6c-b504-014c4afe4dc3","Type":"ContainerStarted","Data":"b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f"} Nov 28 15:28:29 crc kubenswrapper[4647]: I1128 15:28:29.662276 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cf7jh" podStartSLOduration=3.9723108849999997 podStartE2EDuration="1m25.662251722s" podCreationTimestamp="2025-11-28 15:27:04 +0000 UTC" firstStartedPulling="2025-11-28 15:27:07.387732109 +0000 UTC m=+157.235338530" lastFinishedPulling="2025-11-28 15:28:29.077672946 +0000 UTC m=+238.925279367" observedRunningTime="2025-11-28 15:28:29.662222421 +0000 UTC m=+239.509828842" watchObservedRunningTime="2025-11-28 15:28:29.662251722 +0000 UTC m=+239.509858153" Nov 28 15:28:29 crc kubenswrapper[4647]: I1128 15:28:29.679217 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gwwpv" podStartSLOduration=4.908006274 podStartE2EDuration="1m28.679200522s" podCreationTimestamp="2025-11-28 15:27:01 +0000 UTC" firstStartedPulling="2025-11-28 15:27:05.114780215 +0000 UTC m=+154.962386636" lastFinishedPulling="2025-11-28 15:28:28.885974463 +0000 UTC m=+238.733580884" observedRunningTime="2025-11-28 15:28:29.67560428 +0000 UTC m=+239.523210701" watchObservedRunningTime="2025-11-28 15:28:29.679200522 +0000 UTC m=+239.526806943" Nov 28 15:28:29 crc kubenswrapper[4647]: I1128 15:28:29.695759 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fxzbs" podStartSLOduration=5.065983252 podStartE2EDuration="1m28.69574573s" podCreationTimestamp="2025-11-28 15:27:01 +0000 UTC" firstStartedPulling="2025-11-28 15:27:05.08421948 +0000 UTC m=+154.931825901" lastFinishedPulling="2025-11-28 15:28:28.713981958 +0000 UTC m=+238.561588379" observedRunningTime="2025-11-28 15:28:29.692312483 +0000 UTC m=+239.539918904" watchObservedRunningTime="2025-11-28 15:28:29.69574573 +0000 UTC m=+239.543352151" Nov 28 15:28:29 crc kubenswrapper[4647]: I1128 15:28:29.713188 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gbwhg" podStartSLOduration=4.992548848 podStartE2EDuration="1m26.713165302s" podCreationTimestamp="2025-11-28 15:27:03 +0000 UTC" firstStartedPulling="2025-11-28 15:27:07.346311588 +0000 UTC m=+157.193918009" lastFinishedPulling="2025-11-28 15:28:29.066928052 +0000 UTC m=+238.914534463" observedRunningTime="2025-11-28 15:28:29.711381802 +0000 UTC m=+239.558988223" watchObservedRunningTime="2025-11-28 15:28:29.713165302 +0000 UTC m=+239.560771723" Nov 28 15:28:30 crc kubenswrapper[4647]: I1128 15:28:30.661733 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfrkx" event={"ID":"019fb61c-0e19-4aa4-b042-970be7ad9119","Type":"ContainerStarted","Data":"f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711"} Nov 28 15:28:30 crc kubenswrapper[4647]: I1128 15:28:30.664230 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zlhvv" event={"ID":"343a4b6a-4616-462e-89e4-f3087ac0fd72","Type":"ContainerStarted","Data":"23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34"} Nov 28 15:28:30 crc kubenswrapper[4647]: I1128 15:28:30.666211 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmr25" event={"ID":"b87a797d-90cb-4417-b12d-ff15e5776e06","Type":"ContainerStarted","Data":"f17f70ac76e358b1b9586614869941278714062f68038eeb591e97b4ffd153fd"} Nov 28 15:28:30 crc kubenswrapper[4647]: I1128 15:28:30.687477 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bfrkx" podStartSLOduration=4.450652055 podStartE2EDuration="1m26.687455861s" podCreationTimestamp="2025-11-28 15:27:04 +0000 UTC" firstStartedPulling="2025-11-28 15:27:07.489855448 +0000 UTC m=+157.337461869" lastFinishedPulling="2025-11-28 15:28:29.726659254 +0000 UTC m=+239.574265675" observedRunningTime="2025-11-28 15:28:30.683666744 +0000 UTC m=+240.531273175" watchObservedRunningTime="2025-11-28 15:28:30.687455861 +0000 UTC m=+240.535062292" Nov 28 15:28:30 crc kubenswrapper[4647]: I1128 15:28:30.706578 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gmr25" podStartSLOduration=4.223126006 podStartE2EDuration="1m28.706559172s" podCreationTimestamp="2025-11-28 15:27:02 +0000 UTC" firstStartedPulling="2025-11-28 15:27:05.084294262 +0000 UTC m=+154.931900683" lastFinishedPulling="2025-11-28 15:28:29.567727418 +0000 UTC m=+239.415333849" observedRunningTime="2025-11-28 15:28:30.704401311 +0000 UTC m=+240.552007732" watchObservedRunningTime="2025-11-28 15:28:30.706559172 +0000 UTC m=+240.554165593" Nov 28 15:28:30 crc kubenswrapper[4647]: I1128 15:28:30.732435 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zlhvv" podStartSLOduration=3.6197896800000002 podStartE2EDuration="1m25.732398093s" podCreationTimestamp="2025-11-28 15:27:05 +0000 UTC" firstStartedPulling="2025-11-28 15:27:07.527027029 +0000 UTC m=+157.374633450" lastFinishedPulling="2025-11-28 15:28:29.639635452 +0000 UTC m=+239.487241863" observedRunningTime="2025-11-28 15:28:30.72840264 +0000 UTC m=+240.576009061" watchObservedRunningTime="2025-11-28 15:28:30.732398093 +0000 UTC m=+240.580004514" Nov 28 15:28:32 crc kubenswrapper[4647]: I1128 15:28:32.184802 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:28:32 crc kubenswrapper[4647]: I1128 15:28:32.185166 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:28:32 crc kubenswrapper[4647]: I1128 15:28:32.231453 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:28:32 crc kubenswrapper[4647]: I1128 15:28:32.360335 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:28:32 crc kubenswrapper[4647]: I1128 15:28:32.360821 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:28:32 crc kubenswrapper[4647]: I1128 15:28:32.416658 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:28:32 crc kubenswrapper[4647]: I1128 15:28:32.560636 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:28:32 crc kubenswrapper[4647]: I1128 15:28:32.560706 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:28:33 crc kubenswrapper[4647]: I1128 15:28:33.607318 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-gmr25" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerName="registry-server" probeResult="failure" output=< Nov 28 15:28:33 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 15:28:33 crc kubenswrapper[4647]: > Nov 28 15:28:34 crc kubenswrapper[4647]: I1128 15:28:34.319665 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:28:34 crc kubenswrapper[4647]: I1128 15:28:34.319724 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:28:34 crc kubenswrapper[4647]: I1128 15:28:34.359950 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:28:34 crc kubenswrapper[4647]: I1128 15:28:34.679952 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:28:34 crc kubenswrapper[4647]: I1128 15:28:34.680023 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:28:34 crc kubenswrapper[4647]: I1128 15:28:34.737740 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:28:35 crc kubenswrapper[4647]: I1128 15:28:35.348810 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:28:35 crc kubenswrapper[4647]: I1128 15:28:35.348891 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:28:35 crc kubenswrapper[4647]: I1128 15:28:35.714049 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-cf7jh" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerName="registry-server" probeResult="failure" output=< Nov 28 15:28:35 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 15:28:35 crc kubenswrapper[4647]: > Nov 28 15:28:35 crc kubenswrapper[4647]: I1128 15:28:35.824738 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:28:35 crc kubenswrapper[4647]: I1128 15:28:35.825111 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:28:36 crc kubenswrapper[4647]: I1128 15:28:36.384776 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bfrkx" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerName="registry-server" probeResult="failure" output=< Nov 28 15:28:36 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 15:28:36 crc kubenswrapper[4647]: > Nov 28 15:28:36 crc kubenswrapper[4647]: I1128 15:28:36.878096 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zlhvv" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerName="registry-server" probeResult="failure" output=< Nov 28 15:28:36 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 15:28:36 crc kubenswrapper[4647]: > Nov 28 15:28:42 crc kubenswrapper[4647]: I1128 15:28:42.227359 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:28:42 crc kubenswrapper[4647]: I1128 15:28:42.400887 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:28:42 crc kubenswrapper[4647]: I1128 15:28:42.611483 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:28:42 crc kubenswrapper[4647]: I1128 15:28:42.646882 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:28:43 crc kubenswrapper[4647]: I1128 15:28:43.453000 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gmr25"] Nov 28 15:28:43 crc kubenswrapper[4647]: I1128 15:28:43.743863 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gmr25" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerName="registry-server" containerID="cri-o://f17f70ac76e358b1b9586614869941278714062f68038eeb591e97b4ffd153fd" gracePeriod=2 Nov 28 15:28:44 crc kubenswrapper[4647]: I1128 15:28:44.752863 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:28:44 crc kubenswrapper[4647]: I1128 15:28:44.815197 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:28:45 crc kubenswrapper[4647]: I1128 15:28:45.415592 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:28:45 crc kubenswrapper[4647]: I1128 15:28:45.458433 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:28:45 crc kubenswrapper[4647]: I1128 15:28:45.890231 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:28:45 crc kubenswrapper[4647]: I1128 15:28:45.939401 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.056473 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cf7jh"] Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.056814 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cf7jh" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerName="registry-server" containerID="cri-o://bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952" gracePeriod=2 Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.439328 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.473051 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8m9m\" (UniqueName: \"kubernetes.io/projected/5b22190a-1058-4b79-a0a8-ac511c568b3d-kube-api-access-c8m9m\") pod \"5b22190a-1058-4b79-a0a8-ac511c568b3d\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.473108 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-catalog-content\") pod \"5b22190a-1058-4b79-a0a8-ac511c568b3d\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.473155 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-utilities\") pod \"5b22190a-1058-4b79-a0a8-ac511c568b3d\" (UID: \"5b22190a-1058-4b79-a0a8-ac511c568b3d\") " Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.474721 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-utilities" (OuterVolumeSpecName: "utilities") pod "5b22190a-1058-4b79-a0a8-ac511c568b3d" (UID: "5b22190a-1058-4b79-a0a8-ac511c568b3d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.480597 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b22190a-1058-4b79-a0a8-ac511c568b3d-kube-api-access-c8m9m" (OuterVolumeSpecName: "kube-api-access-c8m9m") pod "5b22190a-1058-4b79-a0a8-ac511c568b3d" (UID: "5b22190a-1058-4b79-a0a8-ac511c568b3d"). InnerVolumeSpecName "kube-api-access-c8m9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.503823 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b22190a-1058-4b79-a0a8-ac511c568b3d" (UID: "5b22190a-1058-4b79-a0a8-ac511c568b3d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.574471 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8m9m\" (UniqueName: \"kubernetes.io/projected/5b22190a-1058-4b79-a0a8-ac511c568b3d-kube-api-access-c8m9m\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.574498 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.574508 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b22190a-1058-4b79-a0a8-ac511c568b3d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.776678 4647 generic.go:334] "Generic (PLEG): container finished" podID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerID="f17f70ac76e358b1b9586614869941278714062f68038eeb591e97b4ffd153fd" exitCode=0 Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.776755 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmr25" event={"ID":"b87a797d-90cb-4417-b12d-ff15e5776e06","Type":"ContainerDied","Data":"f17f70ac76e358b1b9586614869941278714062f68038eeb591e97b4ffd153fd"} Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.788773 4647 generic.go:334] "Generic (PLEG): container finished" podID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerID="bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952" exitCode=0 Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.788838 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cf7jh" event={"ID":"5b22190a-1058-4b79-a0a8-ac511c568b3d","Type":"ContainerDied","Data":"bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952"} Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.788885 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cf7jh" event={"ID":"5b22190a-1058-4b79-a0a8-ac511c568b3d","Type":"ContainerDied","Data":"6e144a375f84503a3acaffb7e39751b7be972842682bec900a90577f2f5a3cb0"} Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.788910 4647 scope.go:117] "RemoveContainer" containerID="bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.789107 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cf7jh" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.822785 4647 scope.go:117] "RemoveContainer" containerID="338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.822812 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cf7jh"] Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.827773 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cf7jh"] Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.860456 4647 scope.go:117] "RemoveContainer" containerID="b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.863388 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.881844 4647 scope.go:117] "RemoveContainer" containerID="bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952" Nov 28 15:28:47 crc kubenswrapper[4647]: E1128 15:28:47.882314 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952\": container with ID starting with bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952 not found: ID does not exist" containerID="bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.882349 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952"} err="failed to get container status \"bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952\": rpc error: code = NotFound desc = could not find container \"bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952\": container with ID starting with bf36c449b421180167987a12c60f6e9a35761da100bae8f95cb81bbbb0fda952 not found: ID does not exist" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.882379 4647 scope.go:117] "RemoveContainer" containerID="338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f" Nov 28 15:28:47 crc kubenswrapper[4647]: E1128 15:28:47.885846 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f\": container with ID starting with 338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f not found: ID does not exist" containerID="338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.885893 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f"} err="failed to get container status \"338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f\": rpc error: code = NotFound desc = could not find container \"338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f\": container with ID starting with 338d65c4908f26dd297c69e833fe04f7e7e4cf6296a65ce7bd88197bf6d7796f not found: ID does not exist" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.885925 4647 scope.go:117] "RemoveContainer" containerID="b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95" Nov 28 15:28:47 crc kubenswrapper[4647]: E1128 15:28:47.887696 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95\": container with ID starting with b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95 not found: ID does not exist" containerID="b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.887719 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95"} err="failed to get container status \"b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95\": rpc error: code = NotFound desc = could not find container \"b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95\": container with ID starting with b8b37ec5749e23120849da9244664dd3f0746f4cdb3b7d1288888a79459a9a95 not found: ID does not exist" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.979378 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzsl6\" (UniqueName: \"kubernetes.io/projected/b87a797d-90cb-4417-b12d-ff15e5776e06-kube-api-access-nzsl6\") pod \"b87a797d-90cb-4417-b12d-ff15e5776e06\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.979557 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-catalog-content\") pod \"b87a797d-90cb-4417-b12d-ff15e5776e06\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.979719 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-utilities\") pod \"b87a797d-90cb-4417-b12d-ff15e5776e06\" (UID: \"b87a797d-90cb-4417-b12d-ff15e5776e06\") " Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.980508 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-utilities" (OuterVolumeSpecName: "utilities") pod "b87a797d-90cb-4417-b12d-ff15e5776e06" (UID: "b87a797d-90cb-4417-b12d-ff15e5776e06"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:47 crc kubenswrapper[4647]: I1128 15:28:47.987506 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b87a797d-90cb-4417-b12d-ff15e5776e06-kube-api-access-nzsl6" (OuterVolumeSpecName: "kube-api-access-nzsl6") pod "b87a797d-90cb-4417-b12d-ff15e5776e06" (UID: "b87a797d-90cb-4417-b12d-ff15e5776e06"). InnerVolumeSpecName "kube-api-access-nzsl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.032069 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b87a797d-90cb-4417-b12d-ff15e5776e06" (UID: "b87a797d-90cb-4417-b12d-ff15e5776e06"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.081568 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzsl6\" (UniqueName: \"kubernetes.io/projected/b87a797d-90cb-4417-b12d-ff15e5776e06-kube-api-access-nzsl6\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.081610 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.081619 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b87a797d-90cb-4417-b12d-ff15e5776e06-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.401652 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" path="/var/lib/kubelet/pods/5b22190a-1058-4b79-a0a8-ac511c568b3d/volumes" Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.559080 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-n7kp2"] Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.796444 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmr25" event={"ID":"b87a797d-90cb-4417-b12d-ff15e5776e06","Type":"ContainerDied","Data":"023f2905ece168b649dbfb55707c4f07f172a36cb772c91a0ce319f22d1511de"} Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.796845 4647 scope.go:117] "RemoveContainer" containerID="f17f70ac76e358b1b9586614869941278714062f68038eeb591e97b4ffd153fd" Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.796492 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gmr25" Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.813850 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gmr25"] Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.814988 4647 scope.go:117] "RemoveContainer" containerID="d25181b0d6b7960401e0a68079f6b2d4f4793794073b8be48908738b144a4c61" Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.824302 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gmr25"] Nov 28 15:28:48 crc kubenswrapper[4647]: I1128 15:28:48.842567 4647 scope.go:117] "RemoveContainer" containerID="ddc6eda08d8d35995477efb2b6ebb867fb4c7c007d16edfc9e84cab43fad2df6" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.255641 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zlhvv"] Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.255863 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zlhvv" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerName="registry-server" containerID="cri-o://23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34" gracePeriod=2 Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.729431 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.804139 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-utilities\") pod \"343a4b6a-4616-462e-89e4-f3087ac0fd72\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.804296 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-catalog-content\") pod \"343a4b6a-4616-462e-89e4-f3087ac0fd72\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.804325 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmwqh\" (UniqueName: \"kubernetes.io/projected/343a4b6a-4616-462e-89e4-f3087ac0fd72-kube-api-access-kmwqh\") pod \"343a4b6a-4616-462e-89e4-f3087ac0fd72\" (UID: \"343a4b6a-4616-462e-89e4-f3087ac0fd72\") " Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.805860 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-utilities" (OuterVolumeSpecName: "utilities") pod "343a4b6a-4616-462e-89e4-f3087ac0fd72" (UID: "343a4b6a-4616-462e-89e4-f3087ac0fd72"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.823057 4647 generic.go:334] "Generic (PLEG): container finished" podID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerID="23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34" exitCode=0 Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.823099 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zlhvv" event={"ID":"343a4b6a-4616-462e-89e4-f3087ac0fd72","Type":"ContainerDied","Data":"23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34"} Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.823122 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zlhvv" event={"ID":"343a4b6a-4616-462e-89e4-f3087ac0fd72","Type":"ContainerDied","Data":"e9c3d3584b6b9c98ba019cbd585f7006cddf8946defafecf5e85ba521a4cab8b"} Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.823139 4647 scope.go:117] "RemoveContainer" containerID="23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.823227 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zlhvv" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.837012 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/343a4b6a-4616-462e-89e4-f3087ac0fd72-kube-api-access-kmwqh" (OuterVolumeSpecName: "kube-api-access-kmwqh") pod "343a4b6a-4616-462e-89e4-f3087ac0fd72" (UID: "343a4b6a-4616-462e-89e4-f3087ac0fd72"). InnerVolumeSpecName "kube-api-access-kmwqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.858609 4647 scope.go:117] "RemoveContainer" containerID="ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.908082 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmwqh\" (UniqueName: \"kubernetes.io/projected/343a4b6a-4616-462e-89e4-f3087ac0fd72-kube-api-access-kmwqh\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.908137 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.922403 4647 scope.go:117] "RemoveContainer" containerID="c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.949953 4647 scope.go:117] "RemoveContainer" containerID="23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34" Nov 28 15:28:49 crc kubenswrapper[4647]: E1128 15:28:49.951041 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34\": container with ID starting with 23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34 not found: ID does not exist" containerID="23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.951105 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34"} err="failed to get container status \"23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34\": rpc error: code = NotFound desc = could not find container \"23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34\": container with ID starting with 23ef876b92876570aa154141980ebdb9afde772307262995c9ff66974bd4fc34 not found: ID does not exist" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.951145 4647 scope.go:117] "RemoveContainer" containerID="ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45" Nov 28 15:28:49 crc kubenswrapper[4647]: E1128 15:28:49.951503 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45\": container with ID starting with ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45 not found: ID does not exist" containerID="ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.951524 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45"} err="failed to get container status \"ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45\": rpc error: code = NotFound desc = could not find container \"ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45\": container with ID starting with ac767c814ca2cceed39c20785e53fa2a08456b24cb4d213b38ee5473b909ec45 not found: ID does not exist" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.951538 4647 scope.go:117] "RemoveContainer" containerID="c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c" Nov 28 15:28:49 crc kubenswrapper[4647]: E1128 15:28:49.951754 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c\": container with ID starting with c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c not found: ID does not exist" containerID="c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c" Nov 28 15:28:49 crc kubenswrapper[4647]: I1128 15:28:49.951783 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c"} err="failed to get container status \"c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c\": rpc error: code = NotFound desc = could not find container \"c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c\": container with ID starting with c26e20a8f085fe4053c52f9f018d3d383943abd115628b5a6df78bb85bdbe80c not found: ID does not exist" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.026180 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "343a4b6a-4616-462e-89e4-f3087ac0fd72" (UID: "343a4b6a-4616-462e-89e4-f3087ac0fd72"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.111035 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/343a4b6a-4616-462e-89e4-f3087ac0fd72-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.148021 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zlhvv"] Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.155704 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zlhvv"] Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.403499 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" path="/var/lib/kubelet/pods/343a4b6a-4616-462e-89e4-f3087ac0fd72/volumes" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.404539 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" path="/var/lib/kubelet/pods/b87a797d-90cb-4417-b12d-ff15e5776e06/volumes" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.935273 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gwwpv"] Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.935591 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gwwpv" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerName="registry-server" containerID="cri-o://e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd" gracePeriod=30 Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.943162 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fxzbs"] Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.945174 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fxzbs" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerName="registry-server" containerID="cri-o://2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e" gracePeriod=30 Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.953082 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5rsn5"] Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.953312 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" containerName="marketplace-operator" containerID="cri-o://663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c" gracePeriod=30 Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.969407 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gbwhg"] Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.970070 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gbwhg" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerName="registry-server" containerID="cri-o://b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f" gracePeriod=30 Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.976997 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bfrkx"] Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.977304 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bfrkx" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerName="registry-server" containerID="cri-o://f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711" gracePeriod=30 Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982063 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ql2cs"] Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982344 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60e93f81-f2e3-417a-a26f-14214daf2f20" containerName="pruner" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982364 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="60e93f81-f2e3-417a-a26f-14214daf2f20" containerName="pruner" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982382 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerName="extract-content" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982388 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerName="extract-content" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982399 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982420 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982433 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerName="extract-utilities" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982439 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerName="extract-utilities" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982450 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerName="extract-utilities" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982457 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerName="extract-utilities" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982465 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerName="extract-content" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982472 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerName="extract-content" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982482 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerName="extract-utilities" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982488 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerName="extract-utilities" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982495 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerName="extract-utilities" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982501 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerName="extract-utilities" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982511 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982518 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982526 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerName="extract-content" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982532 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerName="extract-content" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982539 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerName="extract-content" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982544 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerName="extract-content" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982553 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982559 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: E1128 15:28:50.982565 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982571 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982677 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="60e93f81-f2e3-417a-a26f-14214daf2f20" containerName="pruner" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982697 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="b87a797d-90cb-4417-b12d-ff15e5776e06" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982706 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b22190a-1058-4b79-a0a8-ac511c568b3d" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982716 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="10667e93-79c8-470d-a8a1-548e55a41f0e" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.982723 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="343a4b6a-4616-462e-89e4-f3087ac0fd72" containerName="registry-server" Nov 28 15:28:50 crc kubenswrapper[4647]: I1128 15:28:50.985463 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.020379 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5b489703-e3e6-4ef2-b993-766bd6e12094-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ql2cs\" (UID: \"5b489703-e3e6-4ef2-b993-766bd6e12094\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.020475 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k94ht\" (UniqueName: \"kubernetes.io/projected/5b489703-e3e6-4ef2-b993-766bd6e12094-kube-api-access-k94ht\") pod \"marketplace-operator-79b997595-ql2cs\" (UID: \"5b489703-e3e6-4ef2-b993-766bd6e12094\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.020575 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5b489703-e3e6-4ef2-b993-766bd6e12094-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ql2cs\" (UID: \"5b489703-e3e6-4ef2-b993-766bd6e12094\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.030316 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ql2cs"] Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.122319 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5b489703-e3e6-4ef2-b993-766bd6e12094-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ql2cs\" (UID: \"5b489703-e3e6-4ef2-b993-766bd6e12094\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.122396 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5b489703-e3e6-4ef2-b993-766bd6e12094-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ql2cs\" (UID: \"5b489703-e3e6-4ef2-b993-766bd6e12094\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.122449 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k94ht\" (UniqueName: \"kubernetes.io/projected/5b489703-e3e6-4ef2-b993-766bd6e12094-kube-api-access-k94ht\") pod \"marketplace-operator-79b997595-ql2cs\" (UID: \"5b489703-e3e6-4ef2-b993-766bd6e12094\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.126282 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5b489703-e3e6-4ef2-b993-766bd6e12094-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ql2cs\" (UID: \"5b489703-e3e6-4ef2-b993-766bd6e12094\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.141953 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5b489703-e3e6-4ef2-b993-766bd6e12094-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ql2cs\" (UID: \"5b489703-e3e6-4ef2-b993-766bd6e12094\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.144845 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k94ht\" (UniqueName: \"kubernetes.io/projected/5b489703-e3e6-4ef2-b993-766bd6e12094-kube-api-access-k94ht\") pod \"marketplace-operator-79b997595-ql2cs\" (UID: \"5b489703-e3e6-4ef2-b993-766bd6e12094\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.310937 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.335184 4647 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.336832 4647 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.336861 4647 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.336978 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.336996 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.337006 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337013 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.337020 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337026 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.337034 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337040 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.337046 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337052 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.337058 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337064 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337119 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337192 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337206 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337215 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337222 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337231 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.337731 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574" gracePeriod=15 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.338624 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24" gracePeriod=15 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.338690 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421" gracePeriod=15 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.338732 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e" gracePeriod=15 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.338785 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52" gracePeriod=15 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.346975 4647 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.431315 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.431401 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.431440 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.431465 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.431485 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.431500 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.431523 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.431539 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.450239 4647 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.174:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.458266 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.458874 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.533775 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-catalog-content\") pod \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.533826 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-utilities\") pod \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.533951 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwpgj\" (UniqueName: \"kubernetes.io/projected/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-kube-api-access-xwpgj\") pod \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\" (UID: \"4c861dff-9b7a-4c6c-b504-014c4afe4dc3\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.534124 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.534179 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.534200 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.534226 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.534247 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.534266 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.534288 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.534302 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.534390 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.537716 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.540451 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-utilities" (OuterVolumeSpecName: "utilities") pod "4c861dff-9b7a-4c6c-b504-014c4afe4dc3" (UID: "4c861dff-9b7a-4c6c-b504-014c4afe4dc3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.540740 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.540961 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.540992 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.541033 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.541057 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.541083 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.565530 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-kube-api-access-xwpgj" (OuterVolumeSpecName: "kube-api-access-xwpgj") pod "4c861dff-9b7a-4c6c-b504-014c4afe4dc3" (UID: "4c861dff-9b7a-4c6c-b504-014c4afe4dc3"). InnerVolumeSpecName "kube-api-access-xwpgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.569309 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:28:51Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:28:51Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:28:51Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:28:51Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:20434c856c20158a4c73986bf7de93188afa338ed356d293a59f9e621072cfc3\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:24f7dab5f4a6fcbb16d41b8a7345f9f9bae2ef1e2c53abed71c4f18eeafebc85\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1605131077},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:1ab7704f67839bb3705d0c80bea6f7197f233d472860c3005433c90d7786dd54\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:9c13035c7ccf9d13a21c9219d8d0d462fa2fdb4fe128d9724443784b1ed9a318\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1205801806},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:485eae41e5a1129e031da03a9bc899702d16da22589d58a8e0c2910bc0226a23\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:86681c5c7f102911ba70f243ae7524f9a76939abbb50c93b1c80b70e07ccba62\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1195438934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:42da3a01b99987f17824a70b0ac9cde8d27a0ea39d325b9b7216ebdc5ba1f406\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:8932ced4defd2733d4740ea31dd7a6050447207c72233491a6ffdb06926137e7\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1122761533},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.571048 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.571248 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.571471 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.571661 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.571692 4647 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.572976 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c861dff-9b7a-4c6c-b504-014c4afe4dc3" (UID: "4c861dff-9b7a-4c6c-b504-014c4afe4dc3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.630709 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.631303 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.636106 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.636764 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.636789 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.636798 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwpgj\" (UniqueName: \"kubernetes.io/projected/4c861dff-9b7a-4c6c-b504-014c4afe4dc3-kube-api-access-xwpgj\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.646990 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.647491 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.647762 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.647907 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.668697 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.669203 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.669387 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.669683 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.669982 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.682286 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.683739 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.684007 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.684207 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.684362 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.685321 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737569 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tckdm\" (UniqueName: \"kubernetes.io/projected/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-kube-api-access-tckdm\") pod \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737625 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-utilities\") pod \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737687 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-trusted-ca\") pod \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737726 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-catalog-content\") pod \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\" (UID: \"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737760 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-catalog-content\") pod \"019fb61c-0e19-4aa4-b042-970be7ad9119\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737790 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-operator-metrics\") pod \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737831 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-utilities\") pod \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737854 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psmsp\" (UniqueName: \"kubernetes.io/projected/019fb61c-0e19-4aa4-b042-970be7ad9119-kube-api-access-psmsp\") pod \"019fb61c-0e19-4aa4-b042-970be7ad9119\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737932 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-catalog-content\") pod \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737962 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqxl4\" (UniqueName: \"kubernetes.io/projected/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-kube-api-access-rqxl4\") pod \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\" (UID: \"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.737990 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gn7f\" (UniqueName: \"kubernetes.io/projected/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-kube-api-access-4gn7f\") pod \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\" (UID: \"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.738030 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-utilities\") pod \"019fb61c-0e19-4aa4-b042-970be7ad9119\" (UID: \"019fb61c-0e19-4aa4-b042-970be7ad9119\") " Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.738867 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" (UID: "4dc7c86d-c2a4-4903-bafc-63f0184dc2c1"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.739796 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-utilities" (OuterVolumeSpecName: "utilities") pod "1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" (UID: "1dc48c84-53aa-4ee7-b192-0f7f4fcd0026"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.740054 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-utilities" (OuterVolumeSpecName: "utilities") pod "019fb61c-0e19-4aa4-b042-970be7ad9119" (UID: "019fb61c-0e19-4aa4-b042-970be7ad9119"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.740365 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-utilities" (OuterVolumeSpecName: "utilities") pod "c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" (UID: "c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.750813 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-kube-api-access-rqxl4" (OuterVolumeSpecName: "kube-api-access-rqxl4") pod "c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" (UID: "c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb"). InnerVolumeSpecName "kube-api-access-rqxl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.750896 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/019fb61c-0e19-4aa4-b042-970be7ad9119-kube-api-access-psmsp" (OuterVolumeSpecName: "kube-api-access-psmsp") pod "019fb61c-0e19-4aa4-b042-970be7ad9119" (UID: "019fb61c-0e19-4aa4-b042-970be7ad9119"). InnerVolumeSpecName "kube-api-access-psmsp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.750983 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.751006 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-kube-api-access-tckdm" (OuterVolumeSpecName: "kube-api-access-tckdm") pod "1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" (UID: "1dc48c84-53aa-4ee7-b192-0f7f4fcd0026"). InnerVolumeSpecName "kube-api-access-tckdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.751467 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" (UID: "4dc7c86d-c2a4-4903-bafc-63f0184dc2c1"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.761270 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-kube-api-access-4gn7f" (OuterVolumeSpecName: "kube-api-access-4gn7f") pod "4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" (UID: "4dc7c86d-c2a4-4903-bafc-63f0184dc2c1"). InnerVolumeSpecName "kube-api-access-4gn7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: E1128 15:28:51.780053 4647 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.174:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c3546b2646ac6 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 15:28:51.779300038 +0000 UTC m=+261.626906459,LastTimestamp:2025-11-28 15:28:51.779300038 +0000 UTC m=+261.626906459,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.792398 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" (UID: "1dc48c84-53aa-4ee7-b192-0f7f4fcd0026"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.824875 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" (UID: "c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839690 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839733 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psmsp\" (UniqueName: \"kubernetes.io/projected/019fb61c-0e19-4aa4-b042-970be7ad9119-kube-api-access-psmsp\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839748 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839761 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqxl4\" (UniqueName: \"kubernetes.io/projected/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb-kube-api-access-rqxl4\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839774 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gn7f\" (UniqueName: \"kubernetes.io/projected/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-kube-api-access-4gn7f\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839787 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839800 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tckdm\" (UniqueName: \"kubernetes.io/projected/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-kube-api-access-tckdm\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839817 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839829 4647 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839841 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.839850 4647 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.843405 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.844739 4647 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24" exitCode=0 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.844773 4647 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421" exitCode=0 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.844784 4647 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e" exitCode=0 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.844791 4647 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52" exitCode=2 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.850310 4647 generic.go:334] "Generic (PLEG): container finished" podID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerID="b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f" exitCode=0 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.850375 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbwhg" event={"ID":"4c861dff-9b7a-4c6c-b504-014c4afe4dc3","Type":"ContainerDied","Data":"b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.850445 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbwhg" event={"ID":"4c861dff-9b7a-4c6c-b504-014c4afe4dc3","Type":"ContainerDied","Data":"2a3d68f407aabc106760d9865d5805064fd13dde79aecee69667f7fc702e23f4"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.850468 4647 scope.go:117] "RemoveContainer" containerID="b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.850613 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gbwhg" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.851464 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.851680 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.852332 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.852750 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.853020 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.853145 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"ab2a490a055ab3ecb9514ee98c3fba232cb9ee550f51e3ad4a328a5baeeb9fa9"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.857024 4647 generic.go:334] "Generic (PLEG): container finished" podID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" containerID="663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c" exitCode=0 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.857082 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" event={"ID":"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1","Type":"ContainerDied","Data":"663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.857099 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" event={"ID":"4dc7c86d-c2a4-4903-bafc-63f0184dc2c1","Type":"ContainerDied","Data":"71e75fdbded947adfd7452c046b8975de12b65d821ab26a82635ddcef45b42b2"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.857164 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.861095 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.861257 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.861403 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.861747 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.862701 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.865436 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "019fb61c-0e19-4aa4-b042-970be7ad9119" (UID: "019fb61c-0e19-4aa4-b042-970be7ad9119"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.869949 4647 generic.go:334] "Generic (PLEG): container finished" podID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerID="e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd" exitCode=0 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.870015 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gwwpv" event={"ID":"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026","Type":"ContainerDied","Data":"e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.870048 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gwwpv" event={"ID":"1dc48c84-53aa-4ee7-b192-0f7f4fcd0026","Type":"ContainerDied","Data":"c436ae75f7a4302d75440096ac04355332f098fe17689544daf49bc871c8068d"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.870121 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gwwpv" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.870879 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.871219 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.871456 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.872999 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.874032 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.877690 4647 generic.go:334] "Generic (PLEG): container finished" podID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerID="2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e" exitCode=0 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.877844 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxzbs" event={"ID":"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb","Type":"ContainerDied","Data":"2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.877887 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxzbs" event={"ID":"c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb","Type":"ContainerDied","Data":"ed2b5a430dc0494ae1acddec32a8069aadce04f7590aefcb4a2f62ab4f02b760"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.878181 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxzbs" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.879483 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.885402 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.887431 4647 generic.go:334] "Generic (PLEG): container finished" podID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerID="f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711" exitCode=0 Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.887463 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfrkx" event={"ID":"019fb61c-0e19-4aa4-b042-970be7ad9119","Type":"ContainerDied","Data":"f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.887483 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bfrkx" event={"ID":"019fb61c-0e19-4aa4-b042-970be7ad9119","Type":"ContainerDied","Data":"b0933ae08e61952a9edf48915e1223858858b7175535b9c137a93fcb03ea98b5"} Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.887529 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bfrkx" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.890835 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.892851 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.895246 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.896643 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.897316 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.898398 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.900657 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.901619 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.923158 4647 scope.go:117] "RemoveContainer" containerID="85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.944838 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/019fb61c-0e19-4aa4-b042-970be7ad9119-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.975174 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.975665 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.975855 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.976086 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.976284 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.976593 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.976780 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.977078 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.977331 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.977586 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4647]: I1128 15:28:51.996262 4647 scope.go:117] "RemoveContainer" containerID="2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.015023 4647 scope.go:117] "RemoveContainer" containerID="b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.028756 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f\": container with ID starting with b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f not found: ID does not exist" containerID="b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.028818 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f"} err="failed to get container status \"b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f\": rpc error: code = NotFound desc = could not find container \"b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f\": container with ID starting with b83b4772f396f6dff114b2a508940706785a52d457c4a15ef1236d5b9960068f not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.028858 4647 scope.go:117] "RemoveContainer" containerID="85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.029520 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41\": container with ID starting with 85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41 not found: ID does not exist" containerID="85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.029548 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41"} err="failed to get container status \"85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41\": rpc error: code = NotFound desc = could not find container \"85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41\": container with ID starting with 85f247a9dd30fb2167dbe4bad454cad2e8694331e0a7efafed4aa5130abdbb41 not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.029567 4647 scope.go:117] "RemoveContainer" containerID="2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.029846 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d\": container with ID starting with 2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d not found: ID does not exist" containerID="2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.029870 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d"} err="failed to get container status \"2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d\": rpc error: code = NotFound desc = could not find container \"2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d\": container with ID starting with 2336a6916642edff1b273bfc686b7e27473962107edfe18c9d39809d8adeb51d not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.029888 4647 scope.go:117] "RemoveContainer" containerID="663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.048886 4647 scope.go:117] "RemoveContainer" containerID="663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.049383 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c\": container with ID starting with 663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c not found: ID does not exist" containerID="663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.049911 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c"} err="failed to get container status \"663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c\": rpc error: code = NotFound desc = could not find container \"663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c\": container with ID starting with 663ee4353221b47fae2557c0517c7b935e2f29d8ec62a0683d116f351282940c not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.049951 4647 scope.go:117] "RemoveContainer" containerID="e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.074116 4647 scope.go:117] "RemoveContainer" containerID="2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.107449 4647 scope.go:117] "RemoveContainer" containerID="13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.124033 4647 scope.go:117] "RemoveContainer" containerID="e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.125038 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd\": container with ID starting with e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd not found: ID does not exist" containerID="e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.125070 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd"} err="failed to get container status \"e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd\": rpc error: code = NotFound desc = could not find container \"e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd\": container with ID starting with e26ac9329e4c0c74d5c2c30b1f999d47a6ca95ea64cc746fa7975cf5b09f26fd not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.125096 4647 scope.go:117] "RemoveContainer" containerID="2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.125944 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca\": container with ID starting with 2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca not found: ID does not exist" containerID="2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.126002 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca"} err="failed to get container status \"2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca\": rpc error: code = NotFound desc = could not find container \"2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca\": container with ID starting with 2ca009200f69def410b5c4a6ed6c64e69ca52a3c0b3eb6323f8e6a7435c6e7ca not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.126045 4647 scope.go:117] "RemoveContainer" containerID="13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.126537 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059\": container with ID starting with 13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059 not found: ID does not exist" containerID="13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.126565 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059"} err="failed to get container status \"13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059\": rpc error: code = NotFound desc = could not find container \"13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059\": container with ID starting with 13b59027244d39478a60e31147a86d99f50ebec468a9cbaee87729617caaf059 not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.126583 4647 scope.go:117] "RemoveContainer" containerID="2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.145043 4647 scope.go:117] "RemoveContainer" containerID="fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.150156 4647 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 15:28:52 crc kubenswrapper[4647]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-ql2cs_openshift-marketplace_5b489703-e3e6-4ef2-b993-766bd6e12094_0(8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207): error adding pod openshift-marketplace_marketplace-operator-79b997595-ql2cs to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207" Netns:"/var/run/netns/a2f14fde-3ae3-41eb-8c60-a310df8a8528" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-ql2cs;K8S_POD_INFRA_CONTAINER_ID=8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207;K8S_POD_UID=5b489703-e3e6-4ef2-b993-766bd6e12094" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-ql2cs] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-ql2cs/5b489703-e3e6-4ef2-b993-766bd6e12094]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-ql2cs?timeout=1m0s": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:28:52 crc kubenswrapper[4647]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 15:28:52 crc kubenswrapper[4647]: > Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.150216 4647 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 15:28:52 crc kubenswrapper[4647]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-ql2cs_openshift-marketplace_5b489703-e3e6-4ef2-b993-766bd6e12094_0(8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207): error adding pod openshift-marketplace_marketplace-operator-79b997595-ql2cs to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207" Netns:"/var/run/netns/a2f14fde-3ae3-41eb-8c60-a310df8a8528" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-ql2cs;K8S_POD_INFRA_CONTAINER_ID=8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207;K8S_POD_UID=5b489703-e3e6-4ef2-b993-766bd6e12094" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-ql2cs] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-ql2cs/5b489703-e3e6-4ef2-b993-766bd6e12094]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-ql2cs?timeout=1m0s": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:28:52 crc kubenswrapper[4647]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 15:28:52 crc kubenswrapper[4647]: > pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.150237 4647 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Nov 28 15:28:52 crc kubenswrapper[4647]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-ql2cs_openshift-marketplace_5b489703-e3e6-4ef2-b993-766bd6e12094_0(8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207): error adding pod openshift-marketplace_marketplace-operator-79b997595-ql2cs to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207" Netns:"/var/run/netns/a2f14fde-3ae3-41eb-8c60-a310df8a8528" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-ql2cs;K8S_POD_INFRA_CONTAINER_ID=8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207;K8S_POD_UID=5b489703-e3e6-4ef2-b993-766bd6e12094" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-ql2cs] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-ql2cs/5b489703-e3e6-4ef2-b993-766bd6e12094]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-ql2cs?timeout=1m0s": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:28:52 crc kubenswrapper[4647]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 15:28:52 crc kubenswrapper[4647]: > pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.150327 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"marketplace-operator-79b997595-ql2cs_openshift-marketplace(5b489703-e3e6-4ef2-b993-766bd6e12094)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"marketplace-operator-79b997595-ql2cs_openshift-marketplace(5b489703-e3e6-4ef2-b993-766bd6e12094)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-ql2cs_openshift-marketplace_5b489703-e3e6-4ef2-b993-766bd6e12094_0(8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207): error adding pod openshift-marketplace_marketplace-operator-79b997595-ql2cs to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207\\\" Netns:\\\"/var/run/netns/a2f14fde-3ae3-41eb-8c60-a310df8a8528\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-ql2cs;K8S_POD_INFRA_CONTAINER_ID=8ed7b41e6615c29500b6c79ddab1bb8663729e5fc3bb76208c5495c0d9197207;K8S_POD_UID=5b489703-e3e6-4ef2-b993-766bd6e12094\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-ql2cs] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-ql2cs/5b489703-e3e6-4ef2-b993-766bd6e12094]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-ql2cs?timeout=1m0s\\\": dial tcp 38.102.83.174:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" podUID="5b489703-e3e6-4ef2-b993-766bd6e12094" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.170558 4647 scope.go:117] "RemoveContainer" containerID="8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.184143 4647 scope.go:117] "RemoveContainer" containerID="2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.184598 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e\": container with ID starting with 2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e not found: ID does not exist" containerID="2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.184631 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e"} err="failed to get container status \"2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e\": rpc error: code = NotFound desc = could not find container \"2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e\": container with ID starting with 2cd8a86173a1b322729e7d587faea1f4cfe36213cb65e1a562ee2feccc0fcb5e not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.184654 4647 scope.go:117] "RemoveContainer" containerID="fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.185138 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97\": container with ID starting with fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97 not found: ID does not exist" containerID="fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.185162 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97"} err="failed to get container status \"fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97\": rpc error: code = NotFound desc = could not find container \"fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97\": container with ID starting with fcaed226448a0a6ed70aa5f9b44cb39871e129821cbd467f4baa2203b9068e97 not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.185178 4647 scope.go:117] "RemoveContainer" containerID="8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.185404 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1\": container with ID starting with 8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1 not found: ID does not exist" containerID="8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.185436 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1"} err="failed to get container status \"8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1\": rpc error: code = NotFound desc = could not find container \"8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1\": container with ID starting with 8e33a4b442cf758dd8c2fbbac51196b271d597a023cc31646ecb3f13e4b970c1 not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.185451 4647 scope.go:117] "RemoveContainer" containerID="f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.199776 4647 scope.go:117] "RemoveContainer" containerID="e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.226896 4647 scope.go:117] "RemoveContainer" containerID="742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.242350 4647 scope.go:117] "RemoveContainer" containerID="f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.242942 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711\": container with ID starting with f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711 not found: ID does not exist" containerID="f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.242999 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711"} err="failed to get container status \"f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711\": rpc error: code = NotFound desc = could not find container \"f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711\": container with ID starting with f91f17f595849f72468ebc2117692186e31ebb640efd13963278245cd32d6711 not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.243038 4647 scope.go:117] "RemoveContainer" containerID="e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.243711 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7\": container with ID starting with e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7 not found: ID does not exist" containerID="e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.243751 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7"} err="failed to get container status \"e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7\": rpc error: code = NotFound desc = could not find container \"e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7\": container with ID starting with e515514a7a81bc10b6e8ec8fc550c8f7d56c0e26e70e38b7347cc112486e04a7 not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.243780 4647 scope.go:117] "RemoveContainer" containerID="742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.244231 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe\": container with ID starting with 742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe not found: ID does not exist" containerID="742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.244255 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe"} err="failed to get container status \"742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe\": rpc error: code = NotFound desc = could not find container \"742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe\": container with ID starting with 742553c1a63dfea11a2eaf73f4616c4ce6f9e98a5cf797c3c0f22ddfa48a2bfe not found: ID does not exist" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.901801 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5"} Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.902741 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: E1128 15:28:52.902841 4647 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.174:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.903161 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.903342 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.903521 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.903685 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.906449 4647 generic.go:334] "Generic (PLEG): container finished" podID="25dcf47c-400f-4533-9338-0124305e046c" containerID="67d73e31a7bb5b67c63d0ec71a27f1c4b9dbdc423e1c5f2b6681c646553cca94" exitCode=0 Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.906545 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"25dcf47c-400f-4533-9338-0124305e046c","Type":"ContainerDied","Data":"67d73e31a7bb5b67c63d0ec71a27f1c4b9dbdc423e1c5f2b6681c646553cca94"} Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.907056 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.907357 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.907760 4647 status_manager.go:851] "Failed to get status for pod" podUID="25dcf47c-400f-4533-9338-0124305e046c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.908065 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.908437 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.908844 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.911320 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:52 crc kubenswrapper[4647]: I1128 15:28:52.912026 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:53 crc kubenswrapper[4647]: E1128 15:28:53.315351 4647 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 15:28:53 crc kubenswrapper[4647]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-ql2cs_openshift-marketplace_5b489703-e3e6-4ef2-b993-766bd6e12094_0(5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4): error adding pod openshift-marketplace_marketplace-operator-79b997595-ql2cs to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4" Netns:"/var/run/netns/bdd1d9da-2f9e-446b-887d-8f91659e231e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-ql2cs;K8S_POD_INFRA_CONTAINER_ID=5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4;K8S_POD_UID=5b489703-e3e6-4ef2-b993-766bd6e12094" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-ql2cs] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-ql2cs/5b489703-e3e6-4ef2-b993-766bd6e12094]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-ql2cs?timeout=1m0s": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:28:53 crc kubenswrapper[4647]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 15:28:53 crc kubenswrapper[4647]: > Nov 28 15:28:53 crc kubenswrapper[4647]: E1128 15:28:53.315704 4647 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 15:28:53 crc kubenswrapper[4647]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-ql2cs_openshift-marketplace_5b489703-e3e6-4ef2-b993-766bd6e12094_0(5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4): error adding pod openshift-marketplace_marketplace-operator-79b997595-ql2cs to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4" Netns:"/var/run/netns/bdd1d9da-2f9e-446b-887d-8f91659e231e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-ql2cs;K8S_POD_INFRA_CONTAINER_ID=5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4;K8S_POD_UID=5b489703-e3e6-4ef2-b993-766bd6e12094" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-ql2cs] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-ql2cs/5b489703-e3e6-4ef2-b993-766bd6e12094]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-ql2cs?timeout=1m0s": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:28:53 crc kubenswrapper[4647]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 15:28:53 crc kubenswrapper[4647]: > pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:53 crc kubenswrapper[4647]: E1128 15:28:53.315741 4647 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Nov 28 15:28:53 crc kubenswrapper[4647]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-ql2cs_openshift-marketplace_5b489703-e3e6-4ef2-b993-766bd6e12094_0(5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4): error adding pod openshift-marketplace_marketplace-operator-79b997595-ql2cs to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4" Netns:"/var/run/netns/bdd1d9da-2f9e-446b-887d-8f91659e231e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-ql2cs;K8S_POD_INFRA_CONTAINER_ID=5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4;K8S_POD_UID=5b489703-e3e6-4ef2-b993-766bd6e12094" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-ql2cs] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-ql2cs/5b489703-e3e6-4ef2-b993-766bd6e12094]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-ql2cs?timeout=1m0s": dial tcp 38.102.83.174:6443: connect: connection refused Nov 28 15:28:53 crc kubenswrapper[4647]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 15:28:53 crc kubenswrapper[4647]: > pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:28:53 crc kubenswrapper[4647]: E1128 15:28:53.315846 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"marketplace-operator-79b997595-ql2cs_openshift-marketplace(5b489703-e3e6-4ef2-b993-766bd6e12094)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"marketplace-operator-79b997595-ql2cs_openshift-marketplace(5b489703-e3e6-4ef2-b993-766bd6e12094)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-ql2cs_openshift-marketplace_5b489703-e3e6-4ef2-b993-766bd6e12094_0(5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4): error adding pod openshift-marketplace_marketplace-operator-79b997595-ql2cs to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4\\\" Netns:\\\"/var/run/netns/bdd1d9da-2f9e-446b-887d-8f91659e231e\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-ql2cs;K8S_POD_INFRA_CONTAINER_ID=5dc4bf9f0ab05f602101fccd445938ce81cae88d8ac6e18bc98fb715b0212ba4;K8S_POD_UID=5b489703-e3e6-4ef2-b993-766bd6e12094\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-ql2cs] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-ql2cs/5b489703-e3e6-4ef2-b993-766bd6e12094]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-ql2cs in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-ql2cs?timeout=1m0s\\\": dial tcp 38.102.83.174:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" podUID="5b489703-e3e6-4ef2-b993-766bd6e12094" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.797582 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.799157 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.800011 4647 status_manager.go:851] "Failed to get status for pod" podUID="25dcf47c-400f-4533-9338-0124305e046c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.800769 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.801098 4647 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.801611 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.801921 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.802165 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.802379 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.872466 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.873627 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.873994 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.874639 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.874721 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.874757 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.922198 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.923051 4647 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574" exitCode=0 Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.923496 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.924549 4647 scope.go:117] "RemoveContainer" containerID="6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24" Nov 28 15:28:53 crc kubenswrapper[4647]: E1128 15:28:53.927828 4647 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.174:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:28:53 crc kubenswrapper[4647]: E1128 15:28:53.935392 4647 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.174:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c3546b2646ac6 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 15:28:51.779300038 +0000 UTC m=+261.626906459,LastTimestamp:2025-11-28 15:28:51.779300038 +0000 UTC m=+261.626906459,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.950784 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.951366 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.951899 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.952397 4647 status_manager.go:851] "Failed to get status for pod" podUID="25dcf47c-400f-4533-9338-0124305e046c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.952916 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.953370 4647 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.953575 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.958738 4647 scope.go:117] "RemoveContainer" containerID="13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.977280 4647 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.977355 4647 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:53 crc kubenswrapper[4647]: I1128 15:28:53.977384 4647 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.000900 4647 scope.go:117] "RemoveContainer" containerID="4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.015291 4647 scope.go:117] "RemoveContainer" containerID="0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.027355 4647 scope.go:117] "RemoveContainer" containerID="9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.038376 4647 scope.go:117] "RemoveContainer" containerID="bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.071317 4647 scope.go:117] "RemoveContainer" containerID="6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24" Nov 28 15:28:54 crc kubenswrapper[4647]: E1128 15:28:54.072849 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\": container with ID starting with 6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24 not found: ID does not exist" containerID="6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.073059 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24"} err="failed to get container status \"6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\": rpc error: code = NotFound desc = could not find container \"6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24\": container with ID starting with 6e202e2d9cbca8aad58fe743dfd1442b7c9c82b592c98c3a2fa3a61d630a8d24 not found: ID does not exist" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.073234 4647 scope.go:117] "RemoveContainer" containerID="13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421" Nov 28 15:28:54 crc kubenswrapper[4647]: E1128 15:28:54.074285 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\": container with ID starting with 13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421 not found: ID does not exist" containerID="13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.074403 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421"} err="failed to get container status \"13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\": rpc error: code = NotFound desc = could not find container \"13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421\": container with ID starting with 13760a86fc74f8b5262922a4a14946771e51703e4e060105bcd9f83c32e10421 not found: ID does not exist" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.074466 4647 scope.go:117] "RemoveContainer" containerID="4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e" Nov 28 15:28:54 crc kubenswrapper[4647]: E1128 15:28:54.075593 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\": container with ID starting with 4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e not found: ID does not exist" containerID="4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.075678 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e"} err="failed to get container status \"4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\": rpc error: code = NotFound desc = could not find container \"4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e\": container with ID starting with 4e681fd077f804a13e80159ce919336c7178a93a097a5fa04ccd18c1bd9dca5e not found: ID does not exist" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.075750 4647 scope.go:117] "RemoveContainer" containerID="0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52" Nov 28 15:28:54 crc kubenswrapper[4647]: E1128 15:28:54.077158 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\": container with ID starting with 0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52 not found: ID does not exist" containerID="0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.077268 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52"} err="failed to get container status \"0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\": rpc error: code = NotFound desc = could not find container \"0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52\": container with ID starting with 0a9b5076d0411584f3daa983786abf8586e7210ad47bdaf8016b4853e12e1f52 not found: ID does not exist" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.077448 4647 scope.go:117] "RemoveContainer" containerID="9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574" Nov 28 15:28:54 crc kubenswrapper[4647]: E1128 15:28:54.079220 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\": container with ID starting with 9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574 not found: ID does not exist" containerID="9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.079253 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574"} err="failed to get container status \"9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\": rpc error: code = NotFound desc = could not find container \"9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574\": container with ID starting with 9a12afc037dbf93ec89d71c189a607177d3aab44eaa99a496b54e96d952ae574 not found: ID does not exist" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.079274 4647 scope.go:117] "RemoveContainer" containerID="bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410" Nov 28 15:28:54 crc kubenswrapper[4647]: E1128 15:28:54.079720 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\": container with ID starting with bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410 not found: ID does not exist" containerID="bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.079743 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410"} err="failed to get container status \"bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\": rpc error: code = NotFound desc = could not find container \"bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410\": container with ID starting with bd73135217eff01f3c5bdf5ad7af509d78ecc83080417ca57473b3098a31e410 not found: ID does not exist" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.286948 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.287808 4647 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.288378 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.288666 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.288869 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.289109 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.289315 4647 status_manager.go:851] "Failed to get status for pod" podUID="25dcf47c-400f-4533-9338-0124305e046c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.289632 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.382014 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25dcf47c-400f-4533-9338-0124305e046c-kube-api-access\") pod \"25dcf47c-400f-4533-9338-0124305e046c\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.382116 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-kubelet-dir\") pod \"25dcf47c-400f-4533-9338-0124305e046c\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.382150 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-var-lock\") pod \"25dcf47c-400f-4533-9338-0124305e046c\" (UID: \"25dcf47c-400f-4533-9338-0124305e046c\") " Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.382911 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "25dcf47c-400f-4533-9338-0124305e046c" (UID: "25dcf47c-400f-4533-9338-0124305e046c"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.383004 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-var-lock" (OuterVolumeSpecName: "var-lock") pod "25dcf47c-400f-4533-9338-0124305e046c" (UID: "25dcf47c-400f-4533-9338-0124305e046c"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.384724 4647 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.384761 4647 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/25dcf47c-400f-4533-9338-0124305e046c-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.392641 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25dcf47c-400f-4533-9338-0124305e046c-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "25dcf47c-400f-4533-9338-0124305e046c" (UID: "25dcf47c-400f-4533-9338-0124305e046c"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.406225 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.486449 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25dcf47c-400f-4533-9338-0124305e046c-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.955556 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"25dcf47c-400f-4533-9338-0124305e046c","Type":"ContainerDied","Data":"45d4278486d412e092d846990c6c519f7548dc7b7a471b7af6d160704a8360b1"} Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.955608 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45d4278486d412e092d846990c6c519f7548dc7b7a471b7af6d160704a8360b1" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.956120 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.966015 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.966773 4647 status_manager.go:851] "Failed to get status for pod" podUID="25dcf47c-400f-4533-9338-0124305e046c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.967236 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.967589 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.968159 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:54 crc kubenswrapper[4647]: I1128 15:28:54.968865 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:59 crc kubenswrapper[4647]: E1128 15:28:59.965981 4647 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:59 crc kubenswrapper[4647]: E1128 15:28:59.967138 4647 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:59 crc kubenswrapper[4647]: E1128 15:28:59.967952 4647 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:59 crc kubenswrapper[4647]: E1128 15:28:59.968740 4647 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:59 crc kubenswrapper[4647]: E1128 15:28:59.969268 4647 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:28:59 crc kubenswrapper[4647]: I1128 15:28:59.969324 4647 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 28 15:28:59 crc kubenswrapper[4647]: E1128 15:28:59.969773 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="200ms" Nov 28 15:29:00 crc kubenswrapper[4647]: E1128 15:29:00.171261 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="400ms" Nov 28 15:29:00 crc kubenswrapper[4647]: I1128 15:29:00.397991 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:00 crc kubenswrapper[4647]: I1128 15:29:00.399081 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:00 crc kubenswrapper[4647]: I1128 15:29:00.400387 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:00 crc kubenswrapper[4647]: I1128 15:29:00.401190 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:00 crc kubenswrapper[4647]: I1128 15:29:00.401615 4647 status_manager.go:851] "Failed to get status for pod" podUID="25dcf47c-400f-4533-9338-0124305e046c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:00 crc kubenswrapper[4647]: I1128 15:29:00.401906 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:00 crc kubenswrapper[4647]: E1128 15:29:00.572498 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="800ms" Nov 28 15:29:01 crc kubenswrapper[4647]: E1128 15:29:01.374119 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="1.6s" Nov 28 15:29:01 crc kubenswrapper[4647]: E1128 15:29:01.622275 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:29:01Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:29:01Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:29:01Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:29:01Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:20434c856c20158a4c73986bf7de93188afa338ed356d293a59f9e621072cfc3\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:24f7dab5f4a6fcbb16d41b8a7345f9f9bae2ef1e2c53abed71c4f18eeafebc85\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1605131077},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:1ab7704f67839bb3705d0c80bea6f7197f233d472860c3005433c90d7786dd54\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:9c13035c7ccf9d13a21c9219d8d0d462fa2fdb4fe128d9724443784b1ed9a318\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1205801806},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:485eae41e5a1129e031da03a9bc899702d16da22589d58a8e0c2910bc0226a23\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:86681c5c7f102911ba70f243ae7524f9a76939abbb50c93b1c80b70e07ccba62\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1195438934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:42da3a01b99987f17824a70b0ac9cde8d27a0ea39d325b9b7216ebdc5ba1f406\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:8932ced4defd2733d4740ea31dd7a6050447207c72233491a6ffdb06926137e7\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1122761533},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:01 crc kubenswrapper[4647]: E1128 15:29:01.622686 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:01 crc kubenswrapper[4647]: E1128 15:29:01.622890 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:01 crc kubenswrapper[4647]: E1128 15:29:01.623036 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:01 crc kubenswrapper[4647]: E1128 15:29:01.623202 4647 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:01 crc kubenswrapper[4647]: E1128 15:29:01.623225 4647 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:29:02 crc kubenswrapper[4647]: E1128 15:29:02.976340 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="3.2s" Nov 28 15:29:03 crc kubenswrapper[4647]: E1128 15:29:03.936213 4647 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.174:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c3546b2646ac6 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 15:28:51.779300038 +0000 UTC m=+261.626906459,LastTimestamp:2025-11-28 15:28:51.779300038 +0000 UTC m=+261.626906459,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 15:29:05 crc kubenswrapper[4647]: I1128 15:29:05.393727 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:05 crc kubenswrapper[4647]: I1128 15:29:05.394890 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:05 crc kubenswrapper[4647]: I1128 15:29:05.395646 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:05 crc kubenswrapper[4647]: I1128 15:29:05.396307 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:05 crc kubenswrapper[4647]: I1128 15:29:05.396999 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:05 crc kubenswrapper[4647]: I1128 15:29:05.397534 4647 status_manager.go:851] "Failed to get status for pod" podUID="25dcf47c-400f-4533-9338-0124305e046c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:05 crc kubenswrapper[4647]: I1128 15:29:05.398050 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:05 crc kubenswrapper[4647]: I1128 15:29:05.416292 4647 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:05 crc kubenswrapper[4647]: I1128 15:29:05.416338 4647 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:05 crc kubenswrapper[4647]: E1128 15:29:05.416898 4647 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:05 crc kubenswrapper[4647]: I1128 15:29:05.417372 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:05 crc kubenswrapper[4647]: W1128 15:29:05.450071 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-f6db52440adc39f0fc9342902b378036a6811998bb926b7d9c40060103b495a8 WatchSource:0}: Error finding container f6db52440adc39f0fc9342902b378036a6811998bb926b7d9c40060103b495a8: Status 404 returned error can't find the container with id f6db52440adc39f0fc9342902b378036a6811998bb926b7d9c40060103b495a8 Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.035716 4647 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="7228eb73b4782328ce5aca7c5bfeaabc1648861b8d6aa62392c5f48dd26ebe3f" exitCode=0 Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.035850 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"7228eb73b4782328ce5aca7c5bfeaabc1648861b8d6aa62392c5f48dd26ebe3f"} Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.036154 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f6db52440adc39f0fc9342902b378036a6811998bb926b7d9c40060103b495a8"} Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.036694 4647 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.036731 4647 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.037255 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: E1128 15:29:06.037352 4647 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.037827 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.038333 4647 status_manager.go:851] "Failed to get status for pod" podUID="25dcf47c-400f-4533-9338-0124305e046c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.038803 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.039163 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.039592 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.040892 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.040963 4647 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378" exitCode=1 Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.041006 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378"} Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.041597 4647 scope.go:117] "RemoveContainer" containerID="cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.042023 4647 status_manager.go:851] "Failed to get status for pod" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" pod="openshift-marketplace/redhat-marketplace-gbwhg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-gbwhg\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.043010 4647 status_manager.go:851] "Failed to get status for pod" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" pod="openshift-marketplace/redhat-operators-bfrkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bfrkx\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.043731 4647 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.044189 4647 status_manager.go:851] "Failed to get status for pod" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" pod="openshift-marketplace/certified-operators-gwwpv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gwwpv\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.044697 4647 status_manager.go:851] "Failed to get status for pod" podUID="25dcf47c-400f-4533-9338-0124305e046c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.045284 4647 status_manager.go:851] "Failed to get status for pod" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" pod="openshift-marketplace/marketplace-operator-79b997595-5rsn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-5rsn5\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.045899 4647 status_manager.go:851] "Failed to get status for pod" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" pod="openshift-marketplace/community-operators-fxzbs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-fxzbs\": dial tcp 38.102.83.174:6443: connect: connection refused" Nov 28 15:29:06 crc kubenswrapper[4647]: E1128 15:29:06.177705 4647 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.174:6443: connect: connection refused" interval="6.4s" Nov 28 15:29:06 crc kubenswrapper[4647]: I1128 15:29:06.189810 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:29:07 crc kubenswrapper[4647]: I1128 15:29:07.059769 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 15:29:07 crc kubenswrapper[4647]: I1128 15:29:07.061106 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d87166ac21b81a19ae2bc3478bf176bb7400d35033f8ee4592cd545e01462d9e"} Nov 28 15:29:07 crc kubenswrapper[4647]: I1128 15:29:07.065222 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"96f162779e71e0d5baadff7e6c021fdd0710355677c89b2d290adfd69d467c80"} Nov 28 15:29:07 crc kubenswrapper[4647]: I1128 15:29:07.065472 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e770e3b9f5d206fcd3fded4b0df568b4fa3eb6b91aceeb93707f197e56466186"} Nov 28 15:29:07 crc kubenswrapper[4647]: I1128 15:29:07.065543 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5328bbca526db83e43d63c7d93e083e9187aebef3fbf90e3a86405a51b512a08"} Nov 28 15:29:07 crc kubenswrapper[4647]: I1128 15:29:07.065644 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"feb715adde1b9da233841b24b7796b06f1b41ee17e0915b12f558696eff44cc5"} Nov 28 15:29:07 crc kubenswrapper[4647]: I1128 15:29:07.394223 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:29:07 crc kubenswrapper[4647]: I1128 15:29:07.394701 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:29:08 crc kubenswrapper[4647]: I1128 15:29:08.074005 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"9f7f1f92166096865d6a5898c853a515804c153249764538f75674a23e8b9ce6"} Nov 28 15:29:08 crc kubenswrapper[4647]: I1128 15:29:08.074473 4647 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:08 crc kubenswrapper[4647]: I1128 15:29:08.074512 4647 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:10 crc kubenswrapper[4647]: I1128 15:29:10.418457 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:10 crc kubenswrapper[4647]: I1128 15:29:10.418859 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:10 crc kubenswrapper[4647]: I1128 15:29:10.428705 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:12 crc kubenswrapper[4647]: I1128 15:29:12.716751 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:29:13 crc kubenswrapper[4647]: I1128 15:29:13.089050 4647 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:13 crc kubenswrapper[4647]: I1128 15:29:13.109943 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" event={"ID":"5b489703-e3e6-4ef2-b993-766bd6e12094","Type":"ContainerStarted","Data":"f0ee253870d505437c25766aeb80ea8ce4f876f500a1b4fac7728cf4ebac3bf7"} Nov 28 15:29:13 crc kubenswrapper[4647]: I1128 15:29:13.160704 4647 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9c8cfe49-a645-452d-a16d-99bcd476b94f" Nov 28 15:29:13 crc kubenswrapper[4647]: I1128 15:29:13.592509 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" podUID="ecd537d5-ea50-48b2-8565-283566427e38" containerName="oauth-openshift" containerID="cri-o://b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8" gracePeriod=15 Nov 28 15:29:13 crc kubenswrapper[4647]: I1128 15:29:13.967817 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078341 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-ocp-branding-template\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078378 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-login\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078436 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-provider-selection\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078466 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-router-certs\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078487 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-idp-0-file-data\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078507 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-audit-policies\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078544 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-session\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078578 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-cliconfig\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078597 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-service-ca\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078643 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ecd537d5-ea50-48b2-8565-283566427e38-audit-dir\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078686 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgzmg\" (UniqueName: \"kubernetes.io/projected/ecd537d5-ea50-48b2-8565-283566427e38-kube-api-access-jgzmg\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078713 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-serving-cert\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078752 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-error\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.078778 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-trusted-ca-bundle\") pod \"ecd537d5-ea50-48b2-8565-283566427e38\" (UID: \"ecd537d5-ea50-48b2-8565-283566427e38\") " Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.079760 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.079896 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.080081 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ecd537d5-ea50-48b2-8565-283566427e38-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.080622 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.083932 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.087400 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.087712 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.087988 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.088408 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.089117 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.089806 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.093810 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecd537d5-ea50-48b2-8565-283566427e38-kube-api-access-jgzmg" (OuterVolumeSpecName: "kube-api-access-jgzmg") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "kube-api-access-jgzmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.095759 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.103840 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "ecd537d5-ea50-48b2-8565-283566427e38" (UID: "ecd537d5-ea50-48b2-8565-283566427e38"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.128284 4647 generic.go:334] "Generic (PLEG): container finished" podID="ecd537d5-ea50-48b2-8565-283566427e38" containerID="b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8" exitCode=0 Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.128337 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" event={"ID":"ecd537d5-ea50-48b2-8565-283566427e38","Type":"ContainerDied","Data":"b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8"} Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.128365 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" event={"ID":"ecd537d5-ea50-48b2-8565-283566427e38","Type":"ContainerDied","Data":"236e2769c52be860b97953b0332228879a9967a0035ea0bd20c056c43213150c"} Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.128382 4647 scope.go:117] "RemoveContainer" containerID="b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.128509 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-n7kp2" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.160346 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-ql2cs_5b489703-e3e6-4ef2-b993-766bd6e12094/marketplace-operator/0.log" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.160437 4647 generic.go:334] "Generic (PLEG): container finished" podID="5b489703-e3e6-4ef2-b993-766bd6e12094" containerID="8144b9d3d61521a1427fe24f1f186841475ab0ba732f6302ef993b97ab7937f9" exitCode=1 Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.160619 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" event={"ID":"5b489703-e3e6-4ef2-b993-766bd6e12094","Type":"ContainerDied","Data":"8144b9d3d61521a1427fe24f1f186841475ab0ba732f6302ef993b97ab7937f9"} Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.161706 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.161749 4647 scope.go:117] "RemoveContainer" containerID="8144b9d3d61521a1427fe24f1f186841475ab0ba732f6302ef993b97ab7937f9" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.162754 4647 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.162781 4647 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.168036 4647 scope.go:117] "RemoveContainer" containerID="b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8" Nov 28 15:29:14 crc kubenswrapper[4647]: E1128 15:29:14.168626 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8\": container with ID starting with b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8 not found: ID does not exist" containerID="b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.168683 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8"} err="failed to get container status \"b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8\": rpc error: code = NotFound desc = could not find container \"b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8\": container with ID starting with b929d3e257963a0561800d80c521a0ab45cae36b7d9e7a91d887481b6fa5deb8 not found: ID does not exist" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.179529 4647 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9c8cfe49-a645-452d-a16d-99bcd476b94f" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180088 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180150 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180217 4647 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180262 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180291 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180319 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180375 4647 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ecd537d5-ea50-48b2-8565-283566427e38-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180569 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgzmg\" (UniqueName: \"kubernetes.io/projected/ecd537d5-ea50-48b2-8565-283566427e38-kube-api-access-jgzmg\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180597 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180612 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180629 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180644 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180658 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:14 crc kubenswrapper[4647]: I1128 15:29:14.180673 4647 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ecd537d5-ea50-48b2-8565-283566427e38-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:15 crc kubenswrapper[4647]: I1128 15:29:15.170678 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-ql2cs_5b489703-e3e6-4ef2-b993-766bd6e12094/marketplace-operator/1.log" Nov 28 15:29:15 crc kubenswrapper[4647]: I1128 15:29:15.171929 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-ql2cs_5b489703-e3e6-4ef2-b993-766bd6e12094/marketplace-operator/0.log" Nov 28 15:29:15 crc kubenswrapper[4647]: I1128 15:29:15.172009 4647 generic.go:334] "Generic (PLEG): container finished" podID="5b489703-e3e6-4ef2-b993-766bd6e12094" containerID="2bbfd8d04971dacc23576bf1ca870a37407c2a2bd5e888c4e05ea32b348d7b8a" exitCode=1 Nov 28 15:29:15 crc kubenswrapper[4647]: I1128 15:29:15.172113 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" event={"ID":"5b489703-e3e6-4ef2-b993-766bd6e12094","Type":"ContainerDied","Data":"2bbfd8d04971dacc23576bf1ca870a37407c2a2bd5e888c4e05ea32b348d7b8a"} Nov 28 15:29:15 crc kubenswrapper[4647]: I1128 15:29:15.172180 4647 scope.go:117] "RemoveContainer" containerID="8144b9d3d61521a1427fe24f1f186841475ab0ba732f6302ef993b97ab7937f9" Nov 28 15:29:15 crc kubenswrapper[4647]: I1128 15:29:15.173404 4647 scope.go:117] "RemoveContainer" containerID="2bbfd8d04971dacc23576bf1ca870a37407c2a2bd5e888c4e05ea32b348d7b8a" Nov 28 15:29:15 crc kubenswrapper[4647]: I1128 15:29:15.175292 4647 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:15 crc kubenswrapper[4647]: I1128 15:29:15.175328 4647 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:15 crc kubenswrapper[4647]: E1128 15:29:15.180370 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-ql2cs_openshift-marketplace(5b489703-e3e6-4ef2-b993-766bd6e12094)\"" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" podUID="5b489703-e3e6-4ef2-b993-766bd6e12094" Nov 28 15:29:15 crc kubenswrapper[4647]: I1128 15:29:15.182494 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:15 crc kubenswrapper[4647]: I1128 15:29:15.213344 4647 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9c8cfe49-a645-452d-a16d-99bcd476b94f" Nov 28 15:29:16 crc kubenswrapper[4647]: I1128 15:29:16.184726 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-ql2cs_5b489703-e3e6-4ef2-b993-766bd6e12094/marketplace-operator/1.log" Nov 28 15:29:16 crc kubenswrapper[4647]: I1128 15:29:16.185585 4647 scope.go:117] "RemoveContainer" containerID="2bbfd8d04971dacc23576bf1ca870a37407c2a2bd5e888c4e05ea32b348d7b8a" Nov 28 15:29:16 crc kubenswrapper[4647]: E1128 15:29:16.186044 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-ql2cs_openshift-marketplace(5b489703-e3e6-4ef2-b993-766bd6e12094)\"" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" podUID="5b489703-e3e6-4ef2-b993-766bd6e12094" Nov 28 15:29:16 crc kubenswrapper[4647]: I1128 15:29:16.186083 4647 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:16 crc kubenswrapper[4647]: I1128 15:29:16.186169 4647 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:16 crc kubenswrapper[4647]: I1128 15:29:16.189326 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:29:16 crc kubenswrapper[4647]: I1128 15:29:16.189824 4647 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 15:29:16 crc kubenswrapper[4647]: I1128 15:29:16.189918 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 15:29:16 crc kubenswrapper[4647]: I1128 15:29:16.208317 4647 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9c8cfe49-a645-452d-a16d-99bcd476b94f" Nov 28 15:29:21 crc kubenswrapper[4647]: I1128 15:29:21.311476 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:29:21 crc kubenswrapper[4647]: I1128 15:29:21.311907 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:29:21 crc kubenswrapper[4647]: I1128 15:29:21.312779 4647 scope.go:117] "RemoveContainer" containerID="2bbfd8d04971dacc23576bf1ca870a37407c2a2bd5e888c4e05ea32b348d7b8a" Nov 28 15:29:21 crc kubenswrapper[4647]: E1128 15:29:21.313105 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-ql2cs_openshift-marketplace(5b489703-e3e6-4ef2-b993-766bd6e12094)\"" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" podUID="5b489703-e3e6-4ef2-b993-766bd6e12094" Nov 28 15:29:23 crc kubenswrapper[4647]: I1128 15:29:23.257002 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 15:29:23 crc kubenswrapper[4647]: I1128 15:29:23.512883 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 15:29:23 crc kubenswrapper[4647]: I1128 15:29:23.700389 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 15:29:23 crc kubenswrapper[4647]: I1128 15:29:23.739297 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 15:29:23 crc kubenswrapper[4647]: I1128 15:29:23.871132 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 15:29:23 crc kubenswrapper[4647]: I1128 15:29:23.928296 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 15:29:24 crc kubenswrapper[4647]: I1128 15:29:24.198782 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 15:29:24 crc kubenswrapper[4647]: I1128 15:29:24.230735 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 15:29:24 crc kubenswrapper[4647]: I1128 15:29:24.486916 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 15:29:24 crc kubenswrapper[4647]: I1128 15:29:24.777980 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 15:29:24 crc kubenswrapper[4647]: I1128 15:29:24.803388 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.009354 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.075239 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.187710 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.192353 4647 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.459179 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.540829 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.552945 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.561169 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.577624 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.603195 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.687123 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.746240 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.771489 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.773609 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.805751 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.849070 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 15:29:25 crc kubenswrapper[4647]: I1128 15:29:25.860062 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.190318 4647 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.190403 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.318256 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.359010 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.449776 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.468083 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.482586 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.537479 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.641256 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.790708 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.797908 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.959994 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.966212 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 15:29:26 crc kubenswrapper[4647]: I1128 15:29:26.974693 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.044979 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.086028 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.113593 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.146025 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.154964 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.160690 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.201398 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.251884 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.532522 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.650213 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.674785 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.778588 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.824658 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.939452 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 15:29:27 crc kubenswrapper[4647]: I1128 15:29:27.970879 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.025528 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.077903 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.118664 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.132021 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.249809 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.255819 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.269292 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.367142 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.378558 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.406875 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.459090 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.498355 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.541137 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.573703 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.577862 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.627754 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.657025 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.750371 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.855672 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.855682 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.933971 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 15:29:28 crc kubenswrapper[4647]: I1128 15:29:28.972777 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.013107 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.019152 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.120154 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.128312 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.141140 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.193485 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.193745 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.219301 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.273655 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.333809 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.378770 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.396944 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.431119 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.463286 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.477750 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.554908 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.641077 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.642257 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.696447 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 15:29:29 crc kubenswrapper[4647]: I1128 15:29:29.785047 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 15:29:30 crc kubenswrapper[4647]: I1128 15:29:30.300820 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 15:29:30 crc kubenswrapper[4647]: I1128 15:29:30.355839 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 15:29:30 crc kubenswrapper[4647]: I1128 15:29:30.414912 4647 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 15:29:30 crc kubenswrapper[4647]: I1128 15:29:30.525446 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 15:29:30 crc kubenswrapper[4647]: I1128 15:29:30.570343 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 15:29:30 crc kubenswrapper[4647]: I1128 15:29:30.593165 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 15:29:30 crc kubenswrapper[4647]: I1128 15:29:30.715769 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 15:29:30 crc kubenswrapper[4647]: I1128 15:29:30.771092 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 15:29:30 crc kubenswrapper[4647]: I1128 15:29:30.821193 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 15:29:30 crc kubenswrapper[4647]: I1128 15:29:30.844565 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 15:29:31 crc kubenswrapper[4647]: I1128 15:29:31.061749 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 15:29:31 crc kubenswrapper[4647]: I1128 15:29:31.527084 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 15:29:31 crc kubenswrapper[4647]: I1128 15:29:31.620679 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 15:29:31 crc kubenswrapper[4647]: I1128 15:29:31.622613 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 15:29:31 crc kubenswrapper[4647]: I1128 15:29:31.805472 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 15:29:31 crc kubenswrapper[4647]: I1128 15:29:31.889765 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 15:29:31 crc kubenswrapper[4647]: I1128 15:29:31.947779 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 15:29:31 crc kubenswrapper[4647]: I1128 15:29:31.958854 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 15:29:31 crc kubenswrapper[4647]: I1128 15:29:31.982123 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.046863 4647 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052060 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gbwhg","openshift-kube-apiserver/kube-apiserver-crc","openshift-marketplace/marketplace-operator-79b997595-5rsn5","openshift-marketplace/redhat-operators-bfrkx","openshift-marketplace/certified-operators-gwwpv","openshift-authentication/oauth-openshift-558db77b4-n7kp2","openshift-marketplace/community-operators-fxzbs"] Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052141 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052164 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-57864598c6-bjbgx","openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052719 4647 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052759 4647 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b508f6cb-c167-4c71-8b7d-a6014ca9fafe" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.052768 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052799 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.052814 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerName="extract-utilities" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052824 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerName="extract-utilities" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.052841 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052850 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.052862 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" containerName="marketplace-operator" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052871 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" containerName="marketplace-operator" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.052884 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerName="extract-utilities" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052893 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerName="extract-utilities" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.052908 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecd537d5-ea50-48b2-8565-283566427e38" containerName="oauth-openshift" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052917 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecd537d5-ea50-48b2-8565-283566427e38" containerName="oauth-openshift" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.052929 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerName="extract-utilities" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052938 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerName="extract-utilities" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.052949 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerName="extract-content" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052958 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerName="extract-content" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.052972 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.052981 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.052992 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerName="extract-content" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053001 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerName="extract-content" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.053011 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerName="extract-utilities" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053021 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerName="extract-utilities" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.053033 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053042 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.053054 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25dcf47c-400f-4533-9338-0124305e046c" containerName="installer" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053061 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="25dcf47c-400f-4533-9338-0124305e046c" containerName="installer" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.053071 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerName="extract-content" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053107 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerName="extract-content" Nov 28 15:29:32 crc kubenswrapper[4647]: E1128 15:29:32.053121 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerName="extract-content" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053129 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerName="extract-content" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053244 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="25dcf47c-400f-4533-9338-0124305e046c" containerName="installer" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053258 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecd537d5-ea50-48b2-8565-283566427e38" containerName="oauth-openshift" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053270 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053284 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" containerName="marketplace-operator" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053294 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053307 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053318 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" containerName="registry-server" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.053821 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ql2cs"] Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.054180 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.054812 4647 scope.go:117] "RemoveContainer" containerID="2bbfd8d04971dacc23576bf1ca870a37407c2a2bd5e888c4e05ea32b348d7b8a" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.057604 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.060514 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.058612 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.059244 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.060002 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.060093 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.060131 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.060187 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.060218 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.060304 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.060562 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.064796 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.065295 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.068993 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.070374 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.072481 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.084482 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.088622 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095379 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095476 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-template-login\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095521 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-audit-policies\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095580 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-service-ca\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095623 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-router-certs\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095654 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095703 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-session\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095740 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095781 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095815 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-template-error\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095847 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bb4xn\" (UniqueName: \"kubernetes.io/projected/f2560be0-2ee8-49e0-baa2-ae3587b4617b-kube-api-access-bb4xn\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.095881 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.096009 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.096066 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f2560be0-2ee8-49e0-baa2-ae3587b4617b-audit-dir\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.134085 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=19.134062594 podStartE2EDuration="19.134062594s" podCreationTimestamp="2025-11-28 15:29:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:29:32.130854124 +0000 UTC m=+301.978460545" watchObservedRunningTime="2025-11-28 15:29:32.134062594 +0000 UTC m=+301.981669015" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198445 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198490 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198508 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-template-error\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198525 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bb4xn\" (UniqueName: \"kubernetes.io/projected/f2560be0-2ee8-49e0-baa2-ae3587b4617b-kube-api-access-bb4xn\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198551 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198574 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f2560be0-2ee8-49e0-baa2-ae3587b4617b-audit-dir\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198614 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198633 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-template-login\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198656 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-audit-policies\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198680 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-service-ca\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198704 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-router-certs\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198728 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198755 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.198780 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-session\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.199171 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.199591 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f2560be0-2ee8-49e0-baa2-ae3587b4617b-audit-dir\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.200378 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.200855 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-audit-policies\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.202570 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-service-ca\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.208277 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-template-error\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.208403 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.213105 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-router-certs\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.213175 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-session\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.213791 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.214895 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.217590 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-template-login\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.222159 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bb4xn\" (UniqueName: \"kubernetes.io/projected/f2560be0-2ee8-49e0-baa2-ae3587b4617b-kube-api-access-bb4xn\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.230292 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f2560be0-2ee8-49e0-baa2-ae3587b4617b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-57864598c6-bjbgx\" (UID: \"f2560be0-2ee8-49e0-baa2-ae3587b4617b\") " pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.262942 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.283946 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.296394 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-ql2cs_5b489703-e3e6-4ef2-b993-766bd6e12094/marketplace-operator/1.log" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.296943 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" event={"ID":"5b489703-e3e6-4ef2-b993-766bd6e12094","Type":"ContainerStarted","Data":"59703062f3d6f58b070cd0c31f6c1d12c791240d71b7bf0aa55f8c11ca911cb3"} Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.297838 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.299547 4647 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ql2cs container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" start-of-body= Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.299737 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" podUID="5b489703-e3e6-4ef2-b993-766bd6e12094" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.319262 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" podStartSLOduration=42.319244062 podStartE2EDuration="42.319244062s" podCreationTimestamp="2025-11-28 15:28:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:29:32.317619126 +0000 UTC m=+302.165225557" watchObservedRunningTime="2025-11-28 15:29:32.319244062 +0000 UTC m=+302.166850483" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.354517 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.377369 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.388629 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.396258 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.444582 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="019fb61c-0e19-4aa4-b042-970be7ad9119" path="/var/lib/kubelet/pods/019fb61c-0e19-4aa4-b042-970be7ad9119/volumes" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.445271 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1dc48c84-53aa-4ee7-b192-0f7f4fcd0026" path="/var/lib/kubelet/pods/1dc48c84-53aa-4ee7-b192-0f7f4fcd0026/volumes" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.445836 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c861dff-9b7a-4c6c-b504-014c4afe4dc3" path="/var/lib/kubelet/pods/4c861dff-9b7a-4c6c-b504-014c4afe4dc3/volumes" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.446890 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dc7c86d-c2a4-4903-bafc-63f0184dc2c1" path="/var/lib/kubelet/pods/4dc7c86d-c2a4-4903-bafc-63f0184dc2c1/volumes" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.447349 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb" path="/var/lib/kubelet/pods/c50419a4-9ad3-4bf9-b2fd-a7a8c177e1cb/volumes" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.449162 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecd537d5-ea50-48b2-8565-283566427e38" path="/var/lib/kubelet/pods/ecd537d5-ea50-48b2-8565-283566427e38/volumes" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.602669 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.672162 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.757693 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.769064 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.770270 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 15:29:32 crc kubenswrapper[4647]: I1128 15:29:32.890192 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.005475 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.150196 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.183295 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.236647 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.253219 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.276142 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.280681 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.303008 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-ql2cs" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.402523 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.417558 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.434709 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.450537 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.504880 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.514563 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.591086 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.596766 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.930274 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.968574 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.969099 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 15:29:33 crc kubenswrapper[4647]: I1128 15:29:33.976228 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.009985 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.018773 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.107457 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.107539 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.136444 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.213277 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.415186 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.440511 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.498162 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.622448 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.639575 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.666537 4647 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.708655 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.711370 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.739687 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.753512 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.753517 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.760440 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 15:29:34 crc kubenswrapper[4647]: I1128 15:29:34.978976 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.001149 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.054259 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.084290 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.116081 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.219525 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.233042 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.238130 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.270019 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.271514 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.293891 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 15:29:35 crc kubenswrapper[4647]: E1128 15:29:35.325357 4647 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 15:29:35 crc kubenswrapper[4647]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-57864598c6-bjbgx_openshift-authentication_f2560be0-2ee8-49e0-baa2-ae3587b4617b_0(5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381): error adding pod openshift-authentication_oauth-openshift-57864598c6-bjbgx to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381" Netns:"/var/run/netns/f834ee06-08c9-4949-b5b8-5005775dd332" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-57864598c6-bjbgx;K8S_POD_INFRA_CONTAINER_ID=5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381;K8S_POD_UID=f2560be0-2ee8-49e0-baa2-ae3587b4617b" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-57864598c6-bjbgx] networking: Multus: [openshift-authentication/oauth-openshift-57864598c6-bjbgx/f2560be0-2ee8-49e0-baa2-ae3587b4617b]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-57864598c6-bjbgx in out of cluster comm: pod "oauth-openshift-57864598c6-bjbgx" not found Nov 28 15:29:35 crc kubenswrapper[4647]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 15:29:35 crc kubenswrapper[4647]: > Nov 28 15:29:35 crc kubenswrapper[4647]: E1128 15:29:35.325460 4647 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 15:29:35 crc kubenswrapper[4647]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-57864598c6-bjbgx_openshift-authentication_f2560be0-2ee8-49e0-baa2-ae3587b4617b_0(5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381): error adding pod openshift-authentication_oauth-openshift-57864598c6-bjbgx to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381" Netns:"/var/run/netns/f834ee06-08c9-4949-b5b8-5005775dd332" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-57864598c6-bjbgx;K8S_POD_INFRA_CONTAINER_ID=5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381;K8S_POD_UID=f2560be0-2ee8-49e0-baa2-ae3587b4617b" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-57864598c6-bjbgx] networking: Multus: [openshift-authentication/oauth-openshift-57864598c6-bjbgx/f2560be0-2ee8-49e0-baa2-ae3587b4617b]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-57864598c6-bjbgx in out of cluster comm: pod "oauth-openshift-57864598c6-bjbgx" not found Nov 28 15:29:35 crc kubenswrapper[4647]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 15:29:35 crc kubenswrapper[4647]: > pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:35 crc kubenswrapper[4647]: E1128 15:29:35.325484 4647 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Nov 28 15:29:35 crc kubenswrapper[4647]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-57864598c6-bjbgx_openshift-authentication_f2560be0-2ee8-49e0-baa2-ae3587b4617b_0(5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381): error adding pod openshift-authentication_oauth-openshift-57864598c6-bjbgx to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381" Netns:"/var/run/netns/f834ee06-08c9-4949-b5b8-5005775dd332" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-57864598c6-bjbgx;K8S_POD_INFRA_CONTAINER_ID=5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381;K8S_POD_UID=f2560be0-2ee8-49e0-baa2-ae3587b4617b" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-57864598c6-bjbgx] networking: Multus: [openshift-authentication/oauth-openshift-57864598c6-bjbgx/f2560be0-2ee8-49e0-baa2-ae3587b4617b]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-57864598c6-bjbgx in out of cluster comm: pod "oauth-openshift-57864598c6-bjbgx" not found Nov 28 15:29:35 crc kubenswrapper[4647]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 15:29:35 crc kubenswrapper[4647]: > pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:35 crc kubenswrapper[4647]: E1128 15:29:35.325550 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-57864598c6-bjbgx_openshift-authentication(f2560be0-2ee8-49e0-baa2-ae3587b4617b)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-57864598c6-bjbgx_openshift-authentication(f2560be0-2ee8-49e0-baa2-ae3587b4617b)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-57864598c6-bjbgx_openshift-authentication_f2560be0-2ee8-49e0-baa2-ae3587b4617b_0(5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381): error adding pod openshift-authentication_oauth-openshift-57864598c6-bjbgx to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381\\\" Netns:\\\"/var/run/netns/f834ee06-08c9-4949-b5b8-5005775dd332\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-57864598c6-bjbgx;K8S_POD_INFRA_CONTAINER_ID=5e8457dd0bdd8374f962e672fb34b27cc009f4e7a4d98b3451a95d6f3326f381;K8S_POD_UID=f2560be0-2ee8-49e0-baa2-ae3587b4617b\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-57864598c6-bjbgx] networking: Multus: [openshift-authentication/oauth-openshift-57864598c6-bjbgx/f2560be0-2ee8-49e0-baa2-ae3587b4617b]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-57864598c6-bjbgx in out of cluster comm: pod \\\"oauth-openshift-57864598c6-bjbgx\\\" not found\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" podUID="f2560be0-2ee8-49e0-baa2-ae3587b4617b" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.366721 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.435442 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.464727 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.465566 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.503769 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.537274 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.633110 4647 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.633556 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5" gracePeriod=5 Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.686855 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.697541 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.798868 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 15:29:35 crc kubenswrapper[4647]: I1128 15:29:35.965714 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.013535 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.059250 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.074385 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.089306 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.109694 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.129838 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.189387 4647 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.189497 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.189570 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.190452 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"d87166ac21b81a19ae2bc3478bf176bb7400d35033f8ee4592cd545e01462d9e"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.190653 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://d87166ac21b81a19ae2bc3478bf176bb7400d35033f8ee4592cd545e01462d9e" gracePeriod=30 Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.331816 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.365928 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.438102 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.542258 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.561598 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.992975 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 15:29:36 crc kubenswrapper[4647]: I1128 15:29:36.996515 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.015917 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.317295 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.364291 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.480916 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.611108 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.648276 4647 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.666313 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.684432 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.719152 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.815262 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.861947 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.875198 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.911958 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.916479 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 15:29:37 crc kubenswrapper[4647]: I1128 15:29:37.919772 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 15:29:38 crc kubenswrapper[4647]: I1128 15:29:38.148739 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 15:29:38 crc kubenswrapper[4647]: I1128 15:29:38.365406 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 15:29:38 crc kubenswrapper[4647]: I1128 15:29:38.453775 4647 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 15:29:38 crc kubenswrapper[4647]: I1128 15:29:38.513783 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 15:29:38 crc kubenswrapper[4647]: I1128 15:29:38.606793 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 15:29:38 crc kubenswrapper[4647]: I1128 15:29:38.768959 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 15:29:38 crc kubenswrapper[4647]: I1128 15:29:38.820450 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 15:29:38 crc kubenswrapper[4647]: I1128 15:29:38.823681 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 15:29:38 crc kubenswrapper[4647]: I1128 15:29:38.866304 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 15:29:38 crc kubenswrapper[4647]: I1128 15:29:38.893504 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 15:29:39 crc kubenswrapper[4647]: I1128 15:29:39.366347 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 15:29:39 crc kubenswrapper[4647]: I1128 15:29:39.772068 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 15:29:39 crc kubenswrapper[4647]: I1128 15:29:39.884260 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 15:29:40 crc kubenswrapper[4647]: I1128 15:29:40.194311 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 15:29:40 crc kubenswrapper[4647]: I1128 15:29:40.761294 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.238595 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.238697 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.349772 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.349859 4647 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5" exitCode=137 Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.349926 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.349931 4647 scope.go:117] "RemoveContainer" containerID="c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.370852 4647 scope.go:117] "RemoveContainer" containerID="c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5" Nov 28 15:29:41 crc kubenswrapper[4647]: E1128 15:29:41.371523 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5\": container with ID starting with c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5 not found: ID does not exist" containerID="c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.371593 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5"} err="failed to get container status \"c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5\": rpc error: code = NotFound desc = could not find container \"c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5\": container with ID starting with c72fd1c0d73fa32474d5fd8134f36220321a72e32b137ee8861face58b23c2d5 not found: ID does not exist" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.384805 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.384856 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.384924 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.385002 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.385058 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.385075 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.385103 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.385123 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.385400 4647 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.385443 4647 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.385454 4647 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.385505 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.398377 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.487456 4647 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:41 crc kubenswrapper[4647]: I1128 15:29:41.487506 4647 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:29:42 crc kubenswrapper[4647]: I1128 15:29:42.400272 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 28 15:29:50 crc kubenswrapper[4647]: I1128 15:29:50.394482 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:50 crc kubenswrapper[4647]: I1128 15:29:50.400764 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:50 crc kubenswrapper[4647]: I1128 15:29:50.631257 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-57864598c6-bjbgx"] Nov 28 15:29:50 crc kubenswrapper[4647]: W1128 15:29:50.645164 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf2560be0_2ee8_49e0_baa2_ae3587b4617b.slice/crio-679e43dfd0b9957d585f8b61c0456003d344291c0a7412f19f2c7d281d3bb5f0 WatchSource:0}: Error finding container 679e43dfd0b9957d585f8b61c0456003d344291c0a7412f19f2c7d281d3bb5f0: Status 404 returned error can't find the container with id 679e43dfd0b9957d585f8b61c0456003d344291c0a7412f19f2c7d281d3bb5f0 Nov 28 15:29:51 crc kubenswrapper[4647]: I1128 15:29:51.407063 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" event={"ID":"f2560be0-2ee8-49e0-baa2-ae3587b4617b","Type":"ContainerStarted","Data":"dcabac1ed5c719c67bf382a651449dba4d9d47c296549e3affc082202775a58f"} Nov 28 15:29:51 crc kubenswrapper[4647]: I1128 15:29:51.407131 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" event={"ID":"f2560be0-2ee8-49e0-baa2-ae3587b4617b","Type":"ContainerStarted","Data":"679e43dfd0b9957d585f8b61c0456003d344291c0a7412f19f2c7d281d3bb5f0"} Nov 28 15:29:51 crc kubenswrapper[4647]: I1128 15:29:51.407619 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:29:51 crc kubenswrapper[4647]: I1128 15:29:51.429403 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" podStartSLOduration=63.429375774 podStartE2EDuration="1m3.429375774s" podCreationTimestamp="2025-11-28 15:28:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:29:51.424207139 +0000 UTC m=+321.271813550" watchObservedRunningTime="2025-11-28 15:29:51.429375774 +0000 UTC m=+321.276982205" Nov 28 15:29:51 crc kubenswrapper[4647]: I1128 15:29:51.559951 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-57864598c6-bjbgx" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.089975 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9"] Nov 28 15:30:01 crc kubenswrapper[4647]: E1128 15:30:01.090717 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.090729 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.090813 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.091206 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.094911 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.094918 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.125168 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9"] Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.214304 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-secret-volume\") pod \"collect-profiles-29405730-m64h9\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.214352 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-config-volume\") pod \"collect-profiles-29405730-m64h9\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.214549 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfbzd\" (UniqueName: \"kubernetes.io/projected/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-kube-api-access-zfbzd\") pod \"collect-profiles-29405730-m64h9\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.316263 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfbzd\" (UniqueName: \"kubernetes.io/projected/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-kube-api-access-zfbzd\") pod \"collect-profiles-29405730-m64h9\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.316350 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-secret-volume\") pod \"collect-profiles-29405730-m64h9\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.316386 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-config-volume\") pod \"collect-profiles-29405730-m64h9\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.317467 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-config-volume\") pod \"collect-profiles-29405730-m64h9\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.323049 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-secret-volume\") pod \"collect-profiles-29405730-m64h9\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.341878 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfbzd\" (UniqueName: \"kubernetes.io/projected/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-kube-api-access-zfbzd\") pod \"collect-profiles-29405730-m64h9\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.409836 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:01 crc kubenswrapper[4647]: I1128 15:30:01.615788 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9"] Nov 28 15:30:02 crc kubenswrapper[4647]: I1128 15:30:02.162324 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 15:30:02 crc kubenswrapper[4647]: I1128 15:30:02.475282 4647 generic.go:334] "Generic (PLEG): container finished" podID="7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb" containerID="e0eae7dde42b8513288dfd8ae43953fc04795f89bf2dd645321a9c387ddf22dc" exitCode=0 Nov 28 15:30:02 crc kubenswrapper[4647]: I1128 15:30:02.475330 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" event={"ID":"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb","Type":"ContainerDied","Data":"e0eae7dde42b8513288dfd8ae43953fc04795f89bf2dd645321a9c387ddf22dc"} Nov 28 15:30:02 crc kubenswrapper[4647]: I1128 15:30:02.475359 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" event={"ID":"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb","Type":"ContainerStarted","Data":"19d731974d00c802d8c384b2057dfefb63f260db25d3b30cf8e9faa54eeaf47b"} Nov 28 15:30:03 crc kubenswrapper[4647]: I1128 15:30:03.706066 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:03 crc kubenswrapper[4647]: I1128 15:30:03.846656 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-config-volume\") pod \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " Nov 28 15:30:03 crc kubenswrapper[4647]: I1128 15:30:03.846760 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-secret-volume\") pod \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " Nov 28 15:30:03 crc kubenswrapper[4647]: I1128 15:30:03.846851 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfbzd\" (UniqueName: \"kubernetes.io/projected/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-kube-api-access-zfbzd\") pod \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\" (UID: \"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb\") " Nov 28 15:30:03 crc kubenswrapper[4647]: I1128 15:30:03.848034 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-config-volume" (OuterVolumeSpecName: "config-volume") pod "7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb" (UID: "7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:30:03 crc kubenswrapper[4647]: I1128 15:30:03.854055 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb" (UID: "7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:30:03 crc kubenswrapper[4647]: I1128 15:30:03.854578 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-kube-api-access-zfbzd" (OuterVolumeSpecName: "kube-api-access-zfbzd") pod "7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb" (UID: "7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb"). InnerVolumeSpecName "kube-api-access-zfbzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:30:03 crc kubenswrapper[4647]: I1128 15:30:03.948957 4647 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:03 crc kubenswrapper[4647]: I1128 15:30:03.949007 4647 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:03 crc kubenswrapper[4647]: I1128 15:30:03.949020 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfbzd\" (UniqueName: \"kubernetes.io/projected/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb-kube-api-access-zfbzd\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:04 crc kubenswrapper[4647]: I1128 15:30:04.488453 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" event={"ID":"7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb","Type":"ContainerDied","Data":"19d731974d00c802d8c384b2057dfefb63f260db25d3b30cf8e9faa54eeaf47b"} Nov 28 15:30:04 crc kubenswrapper[4647]: I1128 15:30:04.488842 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19d731974d00c802d8c384b2057dfefb63f260db25d3b30cf8e9faa54eeaf47b" Nov 28 15:30:04 crc kubenswrapper[4647]: I1128 15:30:04.488606 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9" Nov 28 15:30:06 crc kubenswrapper[4647]: I1128 15:30:06.503404 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 28 15:30:06 crc kubenswrapper[4647]: I1128 15:30:06.506001 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 15:30:06 crc kubenswrapper[4647]: I1128 15:30:06.506071 4647 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="d87166ac21b81a19ae2bc3478bf176bb7400d35033f8ee4592cd545e01462d9e" exitCode=137 Nov 28 15:30:06 crc kubenswrapper[4647]: I1128 15:30:06.506114 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"d87166ac21b81a19ae2bc3478bf176bb7400d35033f8ee4592cd545e01462d9e"} Nov 28 15:30:06 crc kubenswrapper[4647]: I1128 15:30:06.506168 4647 scope.go:117] "RemoveContainer" containerID="cebb9e374fd0e2caf9cc3dae88a05c660d4ad6268ff3be94f7b11916996e6378" Nov 28 15:30:07 crc kubenswrapper[4647]: I1128 15:30:07.515009 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 28 15:30:07 crc kubenswrapper[4647]: I1128 15:30:07.518300 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2931b935c26b15be374c187969142915be4e9a7b27232d29ed44b6b7f545e47f"} Nov 28 15:30:12 crc kubenswrapper[4647]: I1128 15:30:12.717243 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:30:16 crc kubenswrapper[4647]: I1128 15:30:16.189585 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:30:16 crc kubenswrapper[4647]: I1128 15:30:16.194403 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:30:17 crc kubenswrapper[4647]: I1128 15:30:17.023373 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:30:17 crc kubenswrapper[4647]: I1128 15:30:17.023546 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:30:22 crc kubenswrapper[4647]: I1128 15:30:22.721320 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:30:25 crc kubenswrapper[4647]: I1128 15:30:25.823286 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sbztr"] Nov 28 15:30:25 crc kubenswrapper[4647]: E1128 15:30:25.825251 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb" containerName="collect-profiles" Nov 28 15:30:25 crc kubenswrapper[4647]: I1128 15:30:25.825351 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb" containerName="collect-profiles" Nov 28 15:30:25 crc kubenswrapper[4647]: I1128 15:30:25.825588 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb" containerName="collect-profiles" Nov 28 15:30:25 crc kubenswrapper[4647]: I1128 15:30:25.826532 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:25 crc kubenswrapper[4647]: I1128 15:30:25.831084 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 15:30:25 crc kubenswrapper[4647]: I1128 15:30:25.849104 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sbztr"] Nov 28 15:30:25 crc kubenswrapper[4647]: I1128 15:30:25.976402 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b124cbca-8cc0-4bc6-9870-fa348da63a06-catalog-content\") pod \"certified-operators-sbztr\" (UID: \"b124cbca-8cc0-4bc6-9870-fa348da63a06\") " pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:25 crc kubenswrapper[4647]: I1128 15:30:25.976836 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b124cbca-8cc0-4bc6-9870-fa348da63a06-utilities\") pod \"certified-operators-sbztr\" (UID: \"b124cbca-8cc0-4bc6-9870-fa348da63a06\") " pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:25 crc kubenswrapper[4647]: I1128 15:30:25.976990 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9v5r\" (UniqueName: \"kubernetes.io/projected/b124cbca-8cc0-4bc6-9870-fa348da63a06-kube-api-access-r9v5r\") pod \"certified-operators-sbztr\" (UID: \"b124cbca-8cc0-4bc6-9870-fa348da63a06\") " pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.014010 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5dk2w"] Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.015530 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.017792 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.030247 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5dk2w"] Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.078462 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7fc41cd-7d94-436d-9c27-bf868f6b7212-catalog-content\") pod \"community-operators-5dk2w\" (UID: \"f7fc41cd-7d94-436d-9c27-bf868f6b7212\") " pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.078548 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b124cbca-8cc0-4bc6-9870-fa348da63a06-catalog-content\") pod \"certified-operators-sbztr\" (UID: \"b124cbca-8cc0-4bc6-9870-fa348da63a06\") " pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.078585 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7fc41cd-7d94-436d-9c27-bf868f6b7212-utilities\") pod \"community-operators-5dk2w\" (UID: \"f7fc41cd-7d94-436d-9c27-bf868f6b7212\") " pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.078612 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b124cbca-8cc0-4bc6-9870-fa348da63a06-utilities\") pod \"certified-operators-sbztr\" (UID: \"b124cbca-8cc0-4bc6-9870-fa348da63a06\") " pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.078669 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9v5r\" (UniqueName: \"kubernetes.io/projected/b124cbca-8cc0-4bc6-9870-fa348da63a06-kube-api-access-r9v5r\") pod \"certified-operators-sbztr\" (UID: \"b124cbca-8cc0-4bc6-9870-fa348da63a06\") " pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.078720 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94p9b\" (UniqueName: \"kubernetes.io/projected/f7fc41cd-7d94-436d-9c27-bf868f6b7212-kube-api-access-94p9b\") pod \"community-operators-5dk2w\" (UID: \"f7fc41cd-7d94-436d-9c27-bf868f6b7212\") " pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.079472 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b124cbca-8cc0-4bc6-9870-fa348da63a06-catalog-content\") pod \"certified-operators-sbztr\" (UID: \"b124cbca-8cc0-4bc6-9870-fa348da63a06\") " pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.079745 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b124cbca-8cc0-4bc6-9870-fa348da63a06-utilities\") pod \"certified-operators-sbztr\" (UID: \"b124cbca-8cc0-4bc6-9870-fa348da63a06\") " pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.107447 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9v5r\" (UniqueName: \"kubernetes.io/projected/b124cbca-8cc0-4bc6-9870-fa348da63a06-kube-api-access-r9v5r\") pod \"certified-operators-sbztr\" (UID: \"b124cbca-8cc0-4bc6-9870-fa348da63a06\") " pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.155548 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.180710 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7fc41cd-7d94-436d-9c27-bf868f6b7212-catalog-content\") pod \"community-operators-5dk2w\" (UID: \"f7fc41cd-7d94-436d-9c27-bf868f6b7212\") " pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.180831 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7fc41cd-7d94-436d-9c27-bf868f6b7212-utilities\") pod \"community-operators-5dk2w\" (UID: \"f7fc41cd-7d94-436d-9c27-bf868f6b7212\") " pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.180960 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94p9b\" (UniqueName: \"kubernetes.io/projected/f7fc41cd-7d94-436d-9c27-bf868f6b7212-kube-api-access-94p9b\") pod \"community-operators-5dk2w\" (UID: \"f7fc41cd-7d94-436d-9c27-bf868f6b7212\") " pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.182308 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7fc41cd-7d94-436d-9c27-bf868f6b7212-utilities\") pod \"community-operators-5dk2w\" (UID: \"f7fc41cd-7d94-436d-9c27-bf868f6b7212\") " pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.182523 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7fc41cd-7d94-436d-9c27-bf868f6b7212-catalog-content\") pod \"community-operators-5dk2w\" (UID: \"f7fc41cd-7d94-436d-9c27-bf868f6b7212\") " pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.207061 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94p9b\" (UniqueName: \"kubernetes.io/projected/f7fc41cd-7d94-436d-9c27-bf868f6b7212-kube-api-access-94p9b\") pod \"community-operators-5dk2w\" (UID: \"f7fc41cd-7d94-436d-9c27-bf868f6b7212\") " pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.375711 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.446345 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sbztr"] Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.596863 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5dk2w"] Nov 28 15:30:26 crc kubenswrapper[4647]: W1128 15:30:26.608163 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7fc41cd_7d94_436d_9c27_bf868f6b7212.slice/crio-1ee0b3a395caf8a515bda63675eeaa28c74fe8ce14768019a6750f80efd4ef59 WatchSource:0}: Error finding container 1ee0b3a395caf8a515bda63675eeaa28c74fe8ce14768019a6750f80efd4ef59: Status 404 returned error can't find the container with id 1ee0b3a395caf8a515bda63675eeaa28c74fe8ce14768019a6750f80efd4ef59 Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.693734 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dk2w" event={"ID":"f7fc41cd-7d94-436d-9c27-bf868f6b7212","Type":"ContainerStarted","Data":"1ee0b3a395caf8a515bda63675eeaa28c74fe8ce14768019a6750f80efd4ef59"} Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.696191 4647 generic.go:334] "Generic (PLEG): container finished" podID="b124cbca-8cc0-4bc6-9870-fa348da63a06" containerID="79c227cb2626dbb1c7a32de897bbc696ac98892f3cca2ba8e1f5c466f4c68d71" exitCode=0 Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.696266 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbztr" event={"ID":"b124cbca-8cc0-4bc6-9870-fa348da63a06","Type":"ContainerDied","Data":"79c227cb2626dbb1c7a32de897bbc696ac98892f3cca2ba8e1f5c466f4c68d71"} Nov 28 15:30:26 crc kubenswrapper[4647]: I1128 15:30:26.696299 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbztr" event={"ID":"b124cbca-8cc0-4bc6-9870-fa348da63a06","Type":"ContainerStarted","Data":"dd6c839407dff4012fab98289c64d73c24c87765f7f15c897831824cf8b76499"} Nov 28 15:30:27 crc kubenswrapper[4647]: I1128 15:30:27.725203 4647 generic.go:334] "Generic (PLEG): container finished" podID="f7fc41cd-7d94-436d-9c27-bf868f6b7212" containerID="4cb30c64c7231111ff60d87c9c2c50172afe5157fa94225092cfce1b2b085d85" exitCode=0 Nov 28 15:30:27 crc kubenswrapper[4647]: I1128 15:30:27.725283 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dk2w" event={"ID":"f7fc41cd-7d94-436d-9c27-bf868f6b7212","Type":"ContainerDied","Data":"4cb30c64c7231111ff60d87c9c2c50172afe5157fa94225092cfce1b2b085d85"} Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.089764 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf"] Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.090374 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" podUID="0c3edb05-7e83-4753-bb17-23dc077830c4" containerName="route-controller-manager" containerID="cri-o://dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018" gracePeriod=30 Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.102517 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vcv8n"] Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.102842 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" podUID="6b633443-1559-45ea-84d7-41ac090ad0a9" containerName="controller-manager" containerID="cri-o://3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8" gracePeriod=30 Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.232977 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rgvlr"] Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.234068 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.248567 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.267629 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rgvlr"] Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.318538 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-catalog-content\") pod \"redhat-marketplace-rgvlr\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.318662 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-utilities\") pod \"redhat-marketplace-rgvlr\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.318770 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlng2\" (UniqueName: \"kubernetes.io/projected/fbde6f92-55d8-482f-9dac-9b40fbae6c53-kube-api-access-hlng2\") pod \"redhat-marketplace-rgvlr\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.420665 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-utilities\") pod \"redhat-marketplace-rgvlr\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.421215 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-utilities\") pod \"redhat-marketplace-rgvlr\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.421399 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlng2\" (UniqueName: \"kubernetes.io/projected/fbde6f92-55d8-482f-9dac-9b40fbae6c53-kube-api-access-hlng2\") pod \"redhat-marketplace-rgvlr\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.421876 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-catalog-content\") pod \"redhat-marketplace-rgvlr\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.422229 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-catalog-content\") pod \"redhat-marketplace-rgvlr\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.422758 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8s5cf"] Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.423915 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.428381 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.443236 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8s5cf"] Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.455814 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlng2\" (UniqueName: \"kubernetes.io/projected/fbde6f92-55d8-482f-9dac-9b40fbae6c53-kube-api-access-hlng2\") pod \"redhat-marketplace-rgvlr\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.524755 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3671234-ff46-40ec-95b4-c5dd9192ed13-catalog-content\") pod \"redhat-operators-8s5cf\" (UID: \"e3671234-ff46-40ec-95b4-c5dd9192ed13\") " pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.524862 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3671234-ff46-40ec-95b4-c5dd9192ed13-utilities\") pod \"redhat-operators-8s5cf\" (UID: \"e3671234-ff46-40ec-95b4-c5dd9192ed13\") " pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.524916 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dwcz\" (UniqueName: \"kubernetes.io/projected/e3671234-ff46-40ec-95b4-c5dd9192ed13-kube-api-access-9dwcz\") pod \"redhat-operators-8s5cf\" (UID: \"e3671234-ff46-40ec-95b4-c5dd9192ed13\") " pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.585035 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.613404 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.615882 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.627239 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3671234-ff46-40ec-95b4-c5dd9192ed13-catalog-content\") pod \"redhat-operators-8s5cf\" (UID: \"e3671234-ff46-40ec-95b4-c5dd9192ed13\") " pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.627727 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3671234-ff46-40ec-95b4-c5dd9192ed13-utilities\") pod \"redhat-operators-8s5cf\" (UID: \"e3671234-ff46-40ec-95b4-c5dd9192ed13\") " pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.627767 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dwcz\" (UniqueName: \"kubernetes.io/projected/e3671234-ff46-40ec-95b4-c5dd9192ed13-kube-api-access-9dwcz\") pod \"redhat-operators-8s5cf\" (UID: \"e3671234-ff46-40ec-95b4-c5dd9192ed13\") " pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.627859 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3671234-ff46-40ec-95b4-c5dd9192ed13-catalog-content\") pod \"redhat-operators-8s5cf\" (UID: \"e3671234-ff46-40ec-95b4-c5dd9192ed13\") " pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.628354 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3671234-ff46-40ec-95b4-c5dd9192ed13-utilities\") pod \"redhat-operators-8s5cf\" (UID: \"e3671234-ff46-40ec-95b4-c5dd9192ed13\") " pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.649279 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dwcz\" (UniqueName: \"kubernetes.io/projected/e3671234-ff46-40ec-95b4-c5dd9192ed13-kube-api-access-9dwcz\") pod \"redhat-operators-8s5cf\" (UID: \"e3671234-ff46-40ec-95b4-c5dd9192ed13\") " pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.728943 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c3edb05-7e83-4753-bb17-23dc077830c4-serving-cert\") pod \"0c3edb05-7e83-4753-bb17-23dc077830c4\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.728994 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvqrw\" (UniqueName: \"kubernetes.io/projected/6b633443-1559-45ea-84d7-41ac090ad0a9-kube-api-access-kvqrw\") pod \"6b633443-1559-45ea-84d7-41ac090ad0a9\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.729037 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-proxy-ca-bundles\") pod \"6b633443-1559-45ea-84d7-41ac090ad0a9\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.729131 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-client-ca\") pod \"6b633443-1559-45ea-84d7-41ac090ad0a9\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.729159 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-config\") pod \"0c3edb05-7e83-4753-bb17-23dc077830c4\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.729197 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b633443-1559-45ea-84d7-41ac090ad0a9-serving-cert\") pod \"6b633443-1559-45ea-84d7-41ac090ad0a9\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.729219 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2lk2\" (UniqueName: \"kubernetes.io/projected/0c3edb05-7e83-4753-bb17-23dc077830c4-kube-api-access-r2lk2\") pod \"0c3edb05-7e83-4753-bb17-23dc077830c4\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.729242 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-client-ca\") pod \"0c3edb05-7e83-4753-bb17-23dc077830c4\" (UID: \"0c3edb05-7e83-4753-bb17-23dc077830c4\") " Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.729284 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-config\") pod \"6b633443-1559-45ea-84d7-41ac090ad0a9\" (UID: \"6b633443-1559-45ea-84d7-41ac090ad0a9\") " Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.730446 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-client-ca" (OuterVolumeSpecName: "client-ca") pod "6b633443-1559-45ea-84d7-41ac090ad0a9" (UID: "6b633443-1559-45ea-84d7-41ac090ad0a9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.730954 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-client-ca" (OuterVolumeSpecName: "client-ca") pod "0c3edb05-7e83-4753-bb17-23dc077830c4" (UID: "0c3edb05-7e83-4753-bb17-23dc077830c4"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.730948 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-config" (OuterVolumeSpecName: "config") pod "0c3edb05-7e83-4753-bb17-23dc077830c4" (UID: "0c3edb05-7e83-4753-bb17-23dc077830c4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.731006 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "6b633443-1559-45ea-84d7-41ac090ad0a9" (UID: "6b633443-1559-45ea-84d7-41ac090ad0a9"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.731315 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-config" (OuterVolumeSpecName: "config") pod "6b633443-1559-45ea-84d7-41ac090ad0a9" (UID: "6b633443-1559-45ea-84d7-41ac090ad0a9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.734879 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c3edb05-7e83-4753-bb17-23dc077830c4-kube-api-access-r2lk2" (OuterVolumeSpecName: "kube-api-access-r2lk2") pod "0c3edb05-7e83-4753-bb17-23dc077830c4" (UID: "0c3edb05-7e83-4753-bb17-23dc077830c4"). InnerVolumeSpecName "kube-api-access-r2lk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.736204 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b633443-1559-45ea-84d7-41ac090ad0a9-kube-api-access-kvqrw" (OuterVolumeSpecName: "kube-api-access-kvqrw") pod "6b633443-1559-45ea-84d7-41ac090ad0a9" (UID: "6b633443-1559-45ea-84d7-41ac090ad0a9"). InnerVolumeSpecName "kube-api-access-kvqrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.745024 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3edb05-7e83-4753-bb17-23dc077830c4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0c3edb05-7e83-4753-bb17-23dc077830c4" (UID: "0c3edb05-7e83-4753-bb17-23dc077830c4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.745098 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b633443-1559-45ea-84d7-41ac090ad0a9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6b633443-1559-45ea-84d7-41ac090ad0a9" (UID: "6b633443-1559-45ea-84d7-41ac090ad0a9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.766299 4647 generic.go:334] "Generic (PLEG): container finished" podID="0c3edb05-7e83-4753-bb17-23dc077830c4" containerID="dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018" exitCode=0 Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.766508 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.767443 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" event={"ID":"0c3edb05-7e83-4753-bb17-23dc077830c4","Type":"ContainerDied","Data":"dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018"} Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.767503 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf" event={"ID":"0c3edb05-7e83-4753-bb17-23dc077830c4","Type":"ContainerDied","Data":"24bb61e46a03446cb6e1e0be765dd7bb48266cefa3c76ba21fe9e56a6b078f79"} Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.767522 4647 scope.go:117] "RemoveContainer" containerID="dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.775092 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dk2w" event={"ID":"f7fc41cd-7d94-436d-9c27-bf868f6b7212","Type":"ContainerStarted","Data":"a08f3a6cd3dd21f7ab10ab6d83f89ca8875b940e12508c0961183d085e9749fc"} Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.779372 4647 generic.go:334] "Generic (PLEG): container finished" podID="6b633443-1559-45ea-84d7-41ac090ad0a9" containerID="3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8" exitCode=0 Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.779486 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" event={"ID":"6b633443-1559-45ea-84d7-41ac090ad0a9","Type":"ContainerDied","Data":"3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8"} Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.779536 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" event={"ID":"6b633443-1559-45ea-84d7-41ac090ad0a9","Type":"ContainerDied","Data":"30157d87f6b3d6381dfa461ecadf06f746b26bc4ab9fb1b30d637bdf8fbcd22f"} Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.779579 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-vcv8n" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.784162 4647 generic.go:334] "Generic (PLEG): container finished" podID="b124cbca-8cc0-4bc6-9870-fa348da63a06" containerID="668666ff617155ae18ef168856ebd375a63c7f4e1d028e386992d18fbe74b23d" exitCode=0 Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.784202 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbztr" event={"ID":"b124cbca-8cc0-4bc6-9870-fa348da63a06","Type":"ContainerDied","Data":"668666ff617155ae18ef168856ebd375a63c7f4e1d028e386992d18fbe74b23d"} Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.802188 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.828151 4647 scope.go:117] "RemoveContainer" containerID="dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.830744 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c3edb05-7e83-4753-bb17-23dc077830c4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.830774 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvqrw\" (UniqueName: \"kubernetes.io/projected/6b633443-1559-45ea-84d7-41ac090ad0a9-kube-api-access-kvqrw\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.830786 4647 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.830797 4647 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.830811 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.830819 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b633443-1559-45ea-84d7-41ac090ad0a9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.830828 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2lk2\" (UniqueName: \"kubernetes.io/projected/0c3edb05-7e83-4753-bb17-23dc077830c4-kube-api-access-r2lk2\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:28 crc kubenswrapper[4647]: E1128 15:30:28.830822 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018\": container with ID starting with dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018 not found: ID does not exist" containerID="dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.830866 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018"} err="failed to get container status \"dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018\": rpc error: code = NotFound desc = could not find container \"dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018\": container with ID starting with dc35a4bf647091d742b109ff22417c2b31a6a066f642cc437bbe6a0349584018 not found: ID does not exist" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.830899 4647 scope.go:117] "RemoveContainer" containerID="3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.830835 4647 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c3edb05-7e83-4753-bb17-23dc077830c4-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.831048 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b633443-1559-45ea-84d7-41ac090ad0a9-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.843049 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vcv8n"] Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.852233 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-vcv8n"] Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.857688 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf"] Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.858586 4647 scope.go:117] "RemoveContainer" containerID="3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8" Nov 28 15:30:28 crc kubenswrapper[4647]: E1128 15:30:28.859318 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8\": container with ID starting with 3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8 not found: ID does not exist" containerID="3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.859377 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8"} err="failed to get container status \"3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8\": rpc error: code = NotFound desc = could not find container \"3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8\": container with ID starting with 3cf856b65936d0438d79edfe3765de112667c0a66491d39c8ae933cf269da0f8 not found: ID does not exist" Nov 28 15:30:28 crc kubenswrapper[4647]: I1128 15:30:28.861334 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-kd9bf"] Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.037722 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8s5cf"] Nov 28 15:30:29 crc kubenswrapper[4647]: W1128 15:30:29.046074 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3671234_ff46_40ec_95b4_c5dd9192ed13.slice/crio-7b1f52afb6ffc71a5022b411202ddcb28567b9c6127e35b9e30dce7d38d3b111 WatchSource:0}: Error finding container 7b1f52afb6ffc71a5022b411202ddcb28567b9c6127e35b9e30dce7d38d3b111: Status 404 returned error can't find the container with id 7b1f52afb6ffc71a5022b411202ddcb28567b9c6127e35b9e30dce7d38d3b111 Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.098120 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rgvlr"] Nov 28 15:30:29 crc kubenswrapper[4647]: W1128 15:30:29.105122 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfbde6f92_55d8_482f_9dac_9b40fbae6c53.slice/crio-37e87fbd3809d60cce4708e235a676c995b71b44c0579188fcb5cc568d21e9ce WatchSource:0}: Error finding container 37e87fbd3809d60cce4708e235a676c995b71b44c0579188fcb5cc568d21e9ce: Status 404 returned error can't find the container with id 37e87fbd3809d60cce4708e235a676c995b71b44c0579188fcb5cc568d21e9ce Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.657126 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gz7gz"] Nov 28 15:30:29 crc kubenswrapper[4647]: E1128 15:30:29.657524 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c3edb05-7e83-4753-bb17-23dc077830c4" containerName="route-controller-manager" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.657549 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c3edb05-7e83-4753-bb17-23dc077830c4" containerName="route-controller-manager" Nov 28 15:30:29 crc kubenswrapper[4647]: E1128 15:30:29.657579 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b633443-1559-45ea-84d7-41ac090ad0a9" containerName="controller-manager" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.657590 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b633443-1559-45ea-84d7-41ac090ad0a9" containerName="controller-manager" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.657764 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b633443-1559-45ea-84d7-41ac090ad0a9" containerName="controller-manager" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.657779 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c3edb05-7e83-4753-bb17-23dc077830c4" containerName="route-controller-manager" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.658459 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.660960 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4"] Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.661953 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.662242 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.662534 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.664802 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.665055 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.665099 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.665202 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.665336 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.667567 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gz7gz"] Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.669833 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.669868 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.670055 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.670059 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.671812 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.682787 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.734128 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4"] Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.745886 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-config\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.745945 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-proxy-ca-bundles\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.745966 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-client-ca\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.745983 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-config\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.746004 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0632e64f-f7f4-4ddc-98e4-27458ab5b113-serving-cert\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.746231 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7ltx\" (UniqueName: \"kubernetes.io/projected/0632e64f-f7f4-4ddc-98e4-27458ab5b113-kube-api-access-w7ltx\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.746302 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-client-ca\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.746364 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkdqk\" (UniqueName: \"kubernetes.io/projected/79be06cc-c9db-48af-906e-c57c1233a598-kube-api-access-pkdqk\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.746394 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79be06cc-c9db-48af-906e-c57c1233a598-serving-cert\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.796278 4647 generic.go:334] "Generic (PLEG): container finished" podID="f7fc41cd-7d94-436d-9c27-bf868f6b7212" containerID="a08f3a6cd3dd21f7ab10ab6d83f89ca8875b940e12508c0961183d085e9749fc" exitCode=0 Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.796380 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dk2w" event={"ID":"f7fc41cd-7d94-436d-9c27-bf868f6b7212","Type":"ContainerDied","Data":"a08f3a6cd3dd21f7ab10ab6d83f89ca8875b940e12508c0961183d085e9749fc"} Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.799784 4647 generic.go:334] "Generic (PLEG): container finished" podID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerID="d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968" exitCode=0 Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.799883 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgvlr" event={"ID":"fbde6f92-55d8-482f-9dac-9b40fbae6c53","Type":"ContainerDied","Data":"d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968"} Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.799921 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgvlr" event={"ID":"fbde6f92-55d8-482f-9dac-9b40fbae6c53","Type":"ContainerStarted","Data":"37e87fbd3809d60cce4708e235a676c995b71b44c0579188fcb5cc568d21e9ce"} Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.809492 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbztr" event={"ID":"b124cbca-8cc0-4bc6-9870-fa348da63a06","Type":"ContainerStarted","Data":"a92c71e55168e95a93b07170a52308bbb8f8632918a892f4bade1d6ec26df205"} Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.811833 4647 generic.go:334] "Generic (PLEG): container finished" podID="e3671234-ff46-40ec-95b4-c5dd9192ed13" containerID="cd6d9e62e48948b5b547670599a696fbb4359c1e211df6ab8b049d6f7c4f92d0" exitCode=0 Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.811910 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8s5cf" event={"ID":"e3671234-ff46-40ec-95b4-c5dd9192ed13","Type":"ContainerDied","Data":"cd6d9e62e48948b5b547670599a696fbb4359c1e211df6ab8b049d6f7c4f92d0"} Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.812071 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8s5cf" event={"ID":"e3671234-ff46-40ec-95b4-c5dd9192ed13","Type":"ContainerStarted","Data":"7b1f52afb6ffc71a5022b411202ddcb28567b9c6127e35b9e30dce7d38d3b111"} Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.847323 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-config\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.847388 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-proxy-ca-bundles\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.847496 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-client-ca\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.847525 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-config\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.847584 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0632e64f-f7f4-4ddc-98e4-27458ab5b113-serving-cert\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.847638 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7ltx\" (UniqueName: \"kubernetes.io/projected/0632e64f-f7f4-4ddc-98e4-27458ab5b113-kube-api-access-w7ltx\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.847675 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-client-ca\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.847700 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkdqk\" (UniqueName: \"kubernetes.io/projected/79be06cc-c9db-48af-906e-c57c1233a598-kube-api-access-pkdqk\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.847731 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79be06cc-c9db-48af-906e-c57c1233a598-serving-cert\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.848734 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-proxy-ca-bundles\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.849052 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-config\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.849473 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-client-ca\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.850261 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-config\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.851150 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-client-ca\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.860774 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0632e64f-f7f4-4ddc-98e4-27458ab5b113-serving-cert\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.873911 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79be06cc-c9db-48af-906e-c57c1233a598-serving-cert\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.874715 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7ltx\" (UniqueName: \"kubernetes.io/projected/0632e64f-f7f4-4ddc-98e4-27458ab5b113-kube-api-access-w7ltx\") pod \"route-controller-manager-5dcdbd9666-chkz4\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.890023 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkdqk\" (UniqueName: \"kubernetes.io/projected/79be06cc-c9db-48af-906e-c57c1233a598-kube-api-access-pkdqk\") pod \"controller-manager-fb864b4d-gz7gz\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.893398 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sbztr" podStartSLOduration=2.177319724 podStartE2EDuration="4.893378335s" podCreationTimestamp="2025-11-28 15:30:25 +0000 UTC" firstStartedPulling="2025-11-28 15:30:26.697618833 +0000 UTC m=+356.545225254" lastFinishedPulling="2025-11-28 15:30:29.413677444 +0000 UTC m=+359.261283865" observedRunningTime="2025-11-28 15:30:29.86797775 +0000 UTC m=+359.715584191" watchObservedRunningTime="2025-11-28 15:30:29.893378335 +0000 UTC m=+359.740984756" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.978375 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:29 crc kubenswrapper[4647]: I1128 15:30:29.989223 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.287134 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gz7gz"] Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.339466 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4"] Nov 28 15:30:30 crc kubenswrapper[4647]: W1128 15:30:30.350387 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0632e64f_f7f4_4ddc_98e4_27458ab5b113.slice/crio-e6d9c1ca53bf503209b6c75d9d1d2cb60157804ded7e91bc3c5e252a6bca115b WatchSource:0}: Error finding container e6d9c1ca53bf503209b6c75d9d1d2cb60157804ded7e91bc3c5e252a6bca115b: Status 404 returned error can't find the container with id e6d9c1ca53bf503209b6c75d9d1d2cb60157804ded7e91bc3c5e252a6bca115b Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.411850 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c3edb05-7e83-4753-bb17-23dc077830c4" path="/var/lib/kubelet/pods/0c3edb05-7e83-4753-bb17-23dc077830c4/volumes" Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.412384 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b633443-1559-45ea-84d7-41ac090ad0a9" path="/var/lib/kubelet/pods/6b633443-1559-45ea-84d7-41ac090ad0a9/volumes" Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.822517 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" event={"ID":"0632e64f-f7f4-4ddc-98e4-27458ab5b113","Type":"ContainerStarted","Data":"1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993"} Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.822839 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" event={"ID":"0632e64f-f7f4-4ddc-98e4-27458ab5b113","Type":"ContainerStarted","Data":"e6d9c1ca53bf503209b6c75d9d1d2cb60157804ded7e91bc3c5e252a6bca115b"} Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.823458 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.826549 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgvlr" event={"ID":"fbde6f92-55d8-482f-9dac-9b40fbae6c53","Type":"ContainerStarted","Data":"118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae"} Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.829023 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8s5cf" event={"ID":"e3671234-ff46-40ec-95b4-c5dd9192ed13","Type":"ContainerStarted","Data":"2ee1ddf3278e70b3f977211b1a7da881c961718ecca4acdd0f6fa71e71f4e421"} Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.831042 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" event={"ID":"79be06cc-c9db-48af-906e-c57c1233a598","Type":"ContainerStarted","Data":"788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe"} Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.831134 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" event={"ID":"79be06cc-c9db-48af-906e-c57c1233a598","Type":"ContainerStarted","Data":"35b0c6475fe2fb244ae03af8a6714541584e9b27ccf6de056dc8311a8a75288f"} Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.831596 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.834884 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dk2w" event={"ID":"f7fc41cd-7d94-436d-9c27-bf868f6b7212","Type":"ContainerStarted","Data":"f75a9efc5014dc1ae87ba592f45a09263108d9377afe1363899089495a74e361"} Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.838078 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.851212 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" podStartSLOduration=2.851189814 podStartE2EDuration="2.851189814s" podCreationTimestamp="2025-11-28 15:30:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:30:30.849072727 +0000 UTC m=+360.696679148" watchObservedRunningTime="2025-11-28 15:30:30.851189814 +0000 UTC m=+360.698796235" Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.914861 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5dk2w" podStartSLOduration=3.28292413 podStartE2EDuration="5.914837611s" podCreationTimestamp="2025-11-28 15:30:25 +0000 UTC" firstStartedPulling="2025-11-28 15:30:27.73244164 +0000 UTC m=+357.580048061" lastFinishedPulling="2025-11-28 15:30:30.364355131 +0000 UTC m=+360.211961542" observedRunningTime="2025-11-28 15:30:30.909582859 +0000 UTC m=+360.757189280" watchObservedRunningTime="2025-11-28 15:30:30.914837611 +0000 UTC m=+360.762444042" Nov 28 15:30:30 crc kubenswrapper[4647]: I1128 15:30:30.933646 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" podStartSLOduration=2.9336264979999997 podStartE2EDuration="2.933626498s" podCreationTimestamp="2025-11-28 15:30:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:30:30.930802422 +0000 UTC m=+360.778408843" watchObservedRunningTime="2025-11-28 15:30:30.933626498 +0000 UTC m=+360.781232919" Nov 28 15:30:31 crc kubenswrapper[4647]: I1128 15:30:31.145633 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:30:31 crc kubenswrapper[4647]: I1128 15:30:31.845070 4647 generic.go:334] "Generic (PLEG): container finished" podID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerID="118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae" exitCode=0 Nov 28 15:30:31 crc kubenswrapper[4647]: I1128 15:30:31.845164 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgvlr" event={"ID":"fbde6f92-55d8-482f-9dac-9b40fbae6c53","Type":"ContainerDied","Data":"118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae"} Nov 28 15:30:31 crc kubenswrapper[4647]: I1128 15:30:31.847126 4647 generic.go:334] "Generic (PLEG): container finished" podID="e3671234-ff46-40ec-95b4-c5dd9192ed13" containerID="2ee1ddf3278e70b3f977211b1a7da881c961718ecca4acdd0f6fa71e71f4e421" exitCode=0 Nov 28 15:30:31 crc kubenswrapper[4647]: I1128 15:30:31.847176 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8s5cf" event={"ID":"e3671234-ff46-40ec-95b4-c5dd9192ed13","Type":"ContainerDied","Data":"2ee1ddf3278e70b3f977211b1a7da881c961718ecca4acdd0f6fa71e71f4e421"} Nov 28 15:30:32 crc kubenswrapper[4647]: I1128 15:30:32.854273 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8s5cf" event={"ID":"e3671234-ff46-40ec-95b4-c5dd9192ed13","Type":"ContainerStarted","Data":"cd9d588785266ae50f4922fa52479ea2a57f15d3b3ba2d27876644fd9a20b741"} Nov 28 15:30:32 crc kubenswrapper[4647]: I1128 15:30:32.858333 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgvlr" event={"ID":"fbde6f92-55d8-482f-9dac-9b40fbae6c53","Type":"ContainerStarted","Data":"0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624"} Nov 28 15:30:32 crc kubenswrapper[4647]: I1128 15:30:32.876006 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8s5cf" podStartSLOduration=2.14155789 podStartE2EDuration="4.875989816s" podCreationTimestamp="2025-11-28 15:30:28 +0000 UTC" firstStartedPulling="2025-11-28 15:30:29.813350276 +0000 UTC m=+359.660956697" lastFinishedPulling="2025-11-28 15:30:32.547782212 +0000 UTC m=+362.395388623" observedRunningTime="2025-11-28 15:30:32.872083111 +0000 UTC m=+362.719689532" watchObservedRunningTime="2025-11-28 15:30:32.875989816 +0000 UTC m=+362.723596237" Nov 28 15:30:32 crc kubenswrapper[4647]: I1128 15:30:32.902061 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rgvlr" podStartSLOduration=2.385804639 podStartE2EDuration="4.902042149s" podCreationTimestamp="2025-11-28 15:30:28 +0000 UTC" firstStartedPulling="2025-11-28 15:30:29.804719813 +0000 UTC m=+359.652326234" lastFinishedPulling="2025-11-28 15:30:32.320957333 +0000 UTC m=+362.168563744" observedRunningTime="2025-11-28 15:30:32.895848522 +0000 UTC m=+362.743454943" watchObservedRunningTime="2025-11-28 15:30:32.902042149 +0000 UTC m=+362.749648570" Nov 28 15:30:36 crc kubenswrapper[4647]: I1128 15:30:36.156615 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:36 crc kubenswrapper[4647]: I1128 15:30:36.159605 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:36 crc kubenswrapper[4647]: I1128 15:30:36.209535 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:36 crc kubenswrapper[4647]: I1128 15:30:36.376361 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:36 crc kubenswrapper[4647]: I1128 15:30:36.377566 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:36 crc kubenswrapper[4647]: I1128 15:30:36.447121 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:36 crc kubenswrapper[4647]: I1128 15:30:36.922879 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5dk2w" Nov 28 15:30:36 crc kubenswrapper[4647]: I1128 15:30:36.925226 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sbztr" Nov 28 15:30:38 crc kubenswrapper[4647]: I1128 15:30:38.616640 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:38 crc kubenswrapper[4647]: I1128 15:30:38.616699 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:38 crc kubenswrapper[4647]: I1128 15:30:38.662234 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:38 crc kubenswrapper[4647]: I1128 15:30:38.802911 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:38 crc kubenswrapper[4647]: I1128 15:30:38.803007 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:38 crc kubenswrapper[4647]: I1128 15:30:38.861353 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:38 crc kubenswrapper[4647]: I1128 15:30:38.958214 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8s5cf" Nov 28 15:30:38 crc kubenswrapper[4647]: I1128 15:30:38.962602 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 15:30:47 crc kubenswrapper[4647]: I1128 15:30:47.023464 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:30:47 crc kubenswrapper[4647]: I1128 15:30:47.024282 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.734134 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-df99x"] Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.735340 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.780980 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-df99x"] Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.926500 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9xxj\" (UniqueName: \"kubernetes.io/projected/1fc9c8db-939e-4db7-a921-ce217545a601-kube-api-access-x9xxj\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.926572 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1fc9c8db-939e-4db7-a921-ce217545a601-installation-pull-secrets\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.926639 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1fc9c8db-939e-4db7-a921-ce217545a601-trusted-ca\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.926681 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1fc9c8db-939e-4db7-a921-ce217545a601-registry-certificates\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.926713 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.926736 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1fc9c8db-939e-4db7-a921-ce217545a601-bound-sa-token\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.926768 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1fc9c8db-939e-4db7-a921-ce217545a601-ca-trust-extracted\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.926798 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1fc9c8db-939e-4db7-a921-ce217545a601-registry-tls\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:48 crc kubenswrapper[4647]: I1128 15:30:48.960165 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.028198 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9xxj\" (UniqueName: \"kubernetes.io/projected/1fc9c8db-939e-4db7-a921-ce217545a601-kube-api-access-x9xxj\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.028249 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1fc9c8db-939e-4db7-a921-ce217545a601-installation-pull-secrets\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.028302 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1fc9c8db-939e-4db7-a921-ce217545a601-trusted-ca\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.028333 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1fc9c8db-939e-4db7-a921-ce217545a601-registry-certificates\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.028366 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1fc9c8db-939e-4db7-a921-ce217545a601-bound-sa-token\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.028387 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1fc9c8db-939e-4db7-a921-ce217545a601-ca-trust-extracted\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.028427 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1fc9c8db-939e-4db7-a921-ce217545a601-registry-tls\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.029442 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1fc9c8db-939e-4db7-a921-ce217545a601-ca-trust-extracted\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.030857 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1fc9c8db-939e-4db7-a921-ce217545a601-registry-certificates\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.031100 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1fc9c8db-939e-4db7-a921-ce217545a601-trusted-ca\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.035648 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1fc9c8db-939e-4db7-a921-ce217545a601-registry-tls\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.039030 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1fc9c8db-939e-4db7-a921-ce217545a601-installation-pull-secrets\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.046240 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9xxj\" (UniqueName: \"kubernetes.io/projected/1fc9c8db-939e-4db7-a921-ce217545a601-kube-api-access-x9xxj\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.050026 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1fc9c8db-939e-4db7-a921-ce217545a601-bound-sa-token\") pod \"image-registry-66df7c8f76-df99x\" (UID: \"1fc9c8db-939e-4db7-a921-ce217545a601\") " pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.090037 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.541288 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-df99x"] Nov 28 15:30:49 crc kubenswrapper[4647]: I1128 15:30:49.962101 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-df99x" event={"ID":"1fc9c8db-939e-4db7-a921-ce217545a601","Type":"ContainerStarted","Data":"40fc3ca2311fadf023616bfbb36bdd3da661eb799fa99a176eb5bb8b8d76c0a2"} Nov 28 15:30:50 crc kubenswrapper[4647]: I1128 15:30:50.975077 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-df99x" event={"ID":"1fc9c8db-939e-4db7-a921-ce217545a601","Type":"ContainerStarted","Data":"ba29116bf6c1fe2363cba8904bfe53f186444de7b62cbd763cc934d4c5d3c2f7"} Nov 28 15:30:50 crc kubenswrapper[4647]: I1128 15:30:50.975685 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:31:09 crc kubenswrapper[4647]: I1128 15:31:09.104674 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-df99x" Nov 28 15:31:09 crc kubenswrapper[4647]: I1128 15:31:09.148130 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-df99x" podStartSLOduration=21.148106368 podStartE2EDuration="21.148106368s" podCreationTimestamp="2025-11-28 15:30:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:30:50.999709538 +0000 UTC m=+380.847315959" watchObservedRunningTime="2025-11-28 15:31:09.148106368 +0000 UTC m=+398.995712819" Nov 28 15:31:09 crc kubenswrapper[4647]: I1128 15:31:09.190264 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-76zr7"] Nov 28 15:31:10 crc kubenswrapper[4647]: I1128 15:31:10.091438 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gz7gz"] Nov 28 15:31:10 crc kubenswrapper[4647]: I1128 15:31:10.092025 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" podUID="79be06cc-c9db-48af-906e-c57c1233a598" containerName="controller-manager" containerID="cri-o://788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe" gracePeriod=30 Nov 28 15:31:10 crc kubenswrapper[4647]: I1128 15:31:10.188425 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4"] Nov 28 15:31:10 crc kubenswrapper[4647]: I1128 15:31:10.188684 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" podUID="0632e64f-f7f4-4ddc-98e4-27458ab5b113" containerName="route-controller-manager" containerID="cri-o://1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993" gracePeriod=30 Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.602055 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.614282 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.661935 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0632e64f-f7f4-4ddc-98e4-27458ab5b113-serving-cert\") pod \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.662006 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-client-ca\") pod \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.662023 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-client-ca\") pod \"79be06cc-c9db-48af-906e-c57c1233a598\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.662056 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-proxy-ca-bundles\") pod \"79be06cc-c9db-48af-906e-c57c1233a598\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.662090 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-config\") pod \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.662151 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkdqk\" (UniqueName: \"kubernetes.io/projected/79be06cc-c9db-48af-906e-c57c1233a598-kube-api-access-pkdqk\") pod \"79be06cc-c9db-48af-906e-c57c1233a598\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.662188 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79be06cc-c9db-48af-906e-c57c1233a598-serving-cert\") pod \"79be06cc-c9db-48af-906e-c57c1233a598\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.662216 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7ltx\" (UniqueName: \"kubernetes.io/projected/0632e64f-f7f4-4ddc-98e4-27458ab5b113-kube-api-access-w7ltx\") pod \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\" (UID: \"0632e64f-f7f4-4ddc-98e4-27458ab5b113\") " Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.662258 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-config\") pod \"79be06cc-c9db-48af-906e-c57c1233a598\" (UID: \"79be06cc-c9db-48af-906e-c57c1233a598\") " Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.663766 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-config" (OuterVolumeSpecName: "config") pod "79be06cc-c9db-48af-906e-c57c1233a598" (UID: "79be06cc-c9db-48af-906e-c57c1233a598"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.667325 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-client-ca" (OuterVolumeSpecName: "client-ca") pod "79be06cc-c9db-48af-906e-c57c1233a598" (UID: "79be06cc-c9db-48af-906e-c57c1233a598"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.667956 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "79be06cc-c9db-48af-906e-c57c1233a598" (UID: "79be06cc-c9db-48af-906e-c57c1233a598"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.668017 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-client-ca" (OuterVolumeSpecName: "client-ca") pod "0632e64f-f7f4-4ddc-98e4-27458ab5b113" (UID: "0632e64f-f7f4-4ddc-98e4-27458ab5b113"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.669766 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-config" (OuterVolumeSpecName: "config") pod "0632e64f-f7f4-4ddc-98e4-27458ab5b113" (UID: "0632e64f-f7f4-4ddc-98e4-27458ab5b113"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.674653 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79be06cc-c9db-48af-906e-c57c1233a598-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "79be06cc-c9db-48af-906e-c57c1233a598" (UID: "79be06cc-c9db-48af-906e-c57c1233a598"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.674854 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0632e64f-f7f4-4ddc-98e4-27458ab5b113-kube-api-access-w7ltx" (OuterVolumeSpecName: "kube-api-access-w7ltx") pod "0632e64f-f7f4-4ddc-98e4-27458ab5b113" (UID: "0632e64f-f7f4-4ddc-98e4-27458ab5b113"). InnerVolumeSpecName "kube-api-access-w7ltx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.676721 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79be06cc-c9db-48af-906e-c57c1233a598-kube-api-access-pkdqk" (OuterVolumeSpecName: "kube-api-access-pkdqk") pod "79be06cc-c9db-48af-906e-c57c1233a598" (UID: "79be06cc-c9db-48af-906e-c57c1233a598"). InnerVolumeSpecName "kube-api-access-pkdqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.680848 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0632e64f-f7f4-4ddc-98e4-27458ab5b113-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0632e64f-f7f4-4ddc-98e4-27458ab5b113" (UID: "0632e64f-f7f4-4ddc-98e4-27458ab5b113"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.763675 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkdqk\" (UniqueName: \"kubernetes.io/projected/79be06cc-c9db-48af-906e-c57c1233a598-kube-api-access-pkdqk\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.763709 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79be06cc-c9db-48af-906e-c57c1233a598-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.763721 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7ltx\" (UniqueName: \"kubernetes.io/projected/0632e64f-f7f4-4ddc-98e4-27458ab5b113-kube-api-access-w7ltx\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.763733 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.763743 4647 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0632e64f-f7f4-4ddc-98e4-27458ab5b113-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.763752 4647 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.763759 4647 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.763767 4647 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/79be06cc-c9db-48af-906e-c57c1233a598-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:10.763775 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0632e64f-f7f4-4ddc-98e4-27458ab5b113-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.111329 4647 generic.go:334] "Generic (PLEG): container finished" podID="0632e64f-f7f4-4ddc-98e4-27458ab5b113" containerID="1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993" exitCode=0 Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.111461 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.111457 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" event={"ID":"0632e64f-f7f4-4ddc-98e4-27458ab5b113","Type":"ContainerDied","Data":"1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993"} Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.111603 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4" event={"ID":"0632e64f-f7f4-4ddc-98e4-27458ab5b113","Type":"ContainerDied","Data":"e6d9c1ca53bf503209b6c75d9d1d2cb60157804ded7e91bc3c5e252a6bca115b"} Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.111636 4647 scope.go:117] "RemoveContainer" containerID="1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.114386 4647 generic.go:334] "Generic (PLEG): container finished" podID="79be06cc-c9db-48af-906e-c57c1233a598" containerID="788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe" exitCode=0 Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.114602 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" event={"ID":"79be06cc-c9db-48af-906e-c57c1233a598","Type":"ContainerDied","Data":"788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe"} Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.114675 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" event={"ID":"79be06cc-c9db-48af-906e-c57c1233a598","Type":"ContainerDied","Data":"35b0c6475fe2fb244ae03af8a6714541584e9b27ccf6de056dc8311a8a75288f"} Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.114779 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-gz7gz" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.142514 4647 scope.go:117] "RemoveContainer" containerID="1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993" Nov 28 15:31:11 crc kubenswrapper[4647]: E1128 15:31:11.144094 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993\": container with ID starting with 1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993 not found: ID does not exist" containerID="1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.144147 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993"} err="failed to get container status \"1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993\": rpc error: code = NotFound desc = could not find container \"1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993\": container with ID starting with 1472019bf8aeb369ad9002949e36a4e184dea19efac56d47207371278f8eb993 not found: ID does not exist" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.144185 4647 scope.go:117] "RemoveContainer" containerID="788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.153791 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4"] Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.179869 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5dcdbd9666-chkz4"] Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.189871 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gz7gz"] Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.189981 4647 scope.go:117] "RemoveContainer" containerID="788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe" Nov 28 15:31:11 crc kubenswrapper[4647]: E1128 15:31:11.193943 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe\": container with ID starting with 788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe not found: ID does not exist" containerID="788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.194034 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe"} err="failed to get container status \"788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe\": rpc error: code = NotFound desc = could not find container \"788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe\": container with ID starting with 788bef7d99051d5412b66d5babbc08c68722aee3d104ac046ead48cb93c67fbe not found: ID does not exist" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.217498 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-gz7gz"] Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.692151 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-595c689659-fj7dx"] Nov 28 15:31:11 crc kubenswrapper[4647]: E1128 15:31:11.692523 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79be06cc-c9db-48af-906e-c57c1233a598" containerName="controller-manager" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.692546 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="79be06cc-c9db-48af-906e-c57c1233a598" containerName="controller-manager" Nov 28 15:31:11 crc kubenswrapper[4647]: E1128 15:31:11.692560 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0632e64f-f7f4-4ddc-98e4-27458ab5b113" containerName="route-controller-manager" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.692568 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0632e64f-f7f4-4ddc-98e4-27458ab5b113" containerName="route-controller-manager" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.692679 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="79be06cc-c9db-48af-906e-c57c1233a598" containerName="controller-manager" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.692701 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="0632e64f-f7f4-4ddc-98e4-27458ab5b113" containerName="route-controller-manager" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.693351 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.697015 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.697250 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.697614 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb"] Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.697384 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.700357 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.700546 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.700639 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.700698 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.703954 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-595c689659-fj7dx"] Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.708388 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.709490 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.708501 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.708381 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb"] Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.708548 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.708583 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.708595 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.708657 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.791896 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-client-ca\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.791961 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crbzh\" (UniqueName: \"kubernetes.io/projected/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-kube-api-access-crbzh\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.792004 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e47b3563-2344-43bd-8671-16ef9461b8eb-config\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.792226 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-config\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.792306 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-proxy-ca-bundles\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.792351 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e47b3563-2344-43bd-8671-16ef9461b8eb-serving-cert\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.792376 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx54s\" (UniqueName: \"kubernetes.io/projected/e47b3563-2344-43bd-8671-16ef9461b8eb-kube-api-access-sx54s\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.792464 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e47b3563-2344-43bd-8671-16ef9461b8eb-client-ca\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.792508 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-serving-cert\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.893647 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-client-ca\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.893757 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crbzh\" (UniqueName: \"kubernetes.io/projected/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-kube-api-access-crbzh\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.893795 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e47b3563-2344-43bd-8671-16ef9461b8eb-config\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.893835 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-config\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.893867 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e47b3563-2344-43bd-8671-16ef9461b8eb-serving-cert\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.893889 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-proxy-ca-bundles\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.893910 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sx54s\" (UniqueName: \"kubernetes.io/projected/e47b3563-2344-43bd-8671-16ef9461b8eb-kube-api-access-sx54s\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.893941 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e47b3563-2344-43bd-8671-16ef9461b8eb-client-ca\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.893966 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-serving-cert\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.894721 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-client-ca\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.895173 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-proxy-ca-bundles\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.895321 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e47b3563-2344-43bd-8671-16ef9461b8eb-client-ca\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.896043 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-config\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.896063 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e47b3563-2344-43bd-8671-16ef9461b8eb-config\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.901307 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-serving-cert\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.903985 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e47b3563-2344-43bd-8671-16ef9461b8eb-serving-cert\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.917129 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crbzh\" (UniqueName: \"kubernetes.io/projected/133a0f2b-7411-47ba-bbb7-a2d8733d5ff0-kube-api-access-crbzh\") pod \"controller-manager-595c689659-fj7dx\" (UID: \"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0\") " pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:11 crc kubenswrapper[4647]: I1128 15:31:11.920297 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx54s\" (UniqueName: \"kubernetes.io/projected/e47b3563-2344-43bd-8671-16ef9461b8eb-kube-api-access-sx54s\") pod \"route-controller-manager-5cb4cf48b4-694cb\" (UID: \"e47b3563-2344-43bd-8671-16ef9461b8eb\") " pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:12 crc kubenswrapper[4647]: I1128 15:31:12.034168 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:12 crc kubenswrapper[4647]: I1128 15:31:12.041154 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:12 crc kubenswrapper[4647]: I1128 15:31:12.317783 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-595c689659-fj7dx"] Nov 28 15:31:12 crc kubenswrapper[4647]: I1128 15:31:12.403499 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0632e64f-f7f4-4ddc-98e4-27458ab5b113" path="/var/lib/kubelet/pods/0632e64f-f7f4-4ddc-98e4-27458ab5b113/volumes" Nov 28 15:31:12 crc kubenswrapper[4647]: I1128 15:31:12.404532 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79be06cc-c9db-48af-906e-c57c1233a598" path="/var/lib/kubelet/pods/79be06cc-c9db-48af-906e-c57c1233a598/volumes" Nov 28 15:31:12 crc kubenswrapper[4647]: I1128 15:31:12.455235 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb"] Nov 28 15:31:12 crc kubenswrapper[4647]: W1128 15:31:12.459951 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode47b3563_2344_43bd_8671_16ef9461b8eb.slice/crio-201caf67af87188e9f34b143d2c1fe198710a7c30bda92d967cd806b3e887318 WatchSource:0}: Error finding container 201caf67af87188e9f34b143d2c1fe198710a7c30bda92d967cd806b3e887318: Status 404 returned error can't find the container with id 201caf67af87188e9f34b143d2c1fe198710a7c30bda92d967cd806b3e887318 Nov 28 15:31:13 crc kubenswrapper[4647]: I1128 15:31:13.132738 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" event={"ID":"e47b3563-2344-43bd-8671-16ef9461b8eb","Type":"ContainerStarted","Data":"f3f502706fefe980da6552b463ef9b07db795a8de58108a6a008a800af892e7c"} Nov 28 15:31:13 crc kubenswrapper[4647]: I1128 15:31:13.133304 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" event={"ID":"e47b3563-2344-43bd-8671-16ef9461b8eb","Type":"ContainerStarted","Data":"201caf67af87188e9f34b143d2c1fe198710a7c30bda92d967cd806b3e887318"} Nov 28 15:31:13 crc kubenswrapper[4647]: I1128 15:31:13.135514 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:13 crc kubenswrapper[4647]: I1128 15:31:13.138743 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" event={"ID":"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0","Type":"ContainerStarted","Data":"3cd016f4abde9fed3999751fef3be14179dec4ee5fdf48dfca311b1b6f8e2975"} Nov 28 15:31:13 crc kubenswrapper[4647]: I1128 15:31:13.138799 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" event={"ID":"133a0f2b-7411-47ba-bbb7-a2d8733d5ff0","Type":"ContainerStarted","Data":"110ac60ef4c7c7f472f167f1be42aac45df0c835053fb58fa2167f1f679fca07"} Nov 28 15:31:13 crc kubenswrapper[4647]: I1128 15:31:13.139111 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:13 crc kubenswrapper[4647]: I1128 15:31:13.143992 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" Nov 28 15:31:13 crc kubenswrapper[4647]: I1128 15:31:13.144951 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" Nov 28 15:31:13 crc kubenswrapper[4647]: I1128 15:31:13.154033 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5cb4cf48b4-694cb" podStartSLOduration=3.154013907 podStartE2EDuration="3.154013907s" podCreationTimestamp="2025-11-28 15:31:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:31:13.1533837 +0000 UTC m=+403.000990121" watchObservedRunningTime="2025-11-28 15:31:13.154013907 +0000 UTC m=+403.001620318" Nov 28 15:31:13 crc kubenswrapper[4647]: I1128 15:31:13.212020 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-595c689659-fj7dx" podStartSLOduration=3.2120013 podStartE2EDuration="3.2120013s" podCreationTimestamp="2025-11-28 15:31:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:31:13.177257899 +0000 UTC m=+403.024864320" watchObservedRunningTime="2025-11-28 15:31:13.2120013 +0000 UTC m=+403.059607721" Nov 28 15:31:17 crc kubenswrapper[4647]: I1128 15:31:17.022955 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:31:17 crc kubenswrapper[4647]: I1128 15:31:17.023596 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:31:17 crc kubenswrapper[4647]: I1128 15:31:17.023649 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:31:17 crc kubenswrapper[4647]: I1128 15:31:17.024318 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"069845d01aad9e7b2035bd90b8d33350ac2a115f79a2e8bca3a165495ab5bb16"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:31:17 crc kubenswrapper[4647]: I1128 15:31:17.024370 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://069845d01aad9e7b2035bd90b8d33350ac2a115f79a2e8bca3a165495ab5bb16" gracePeriod=600 Nov 28 15:31:17 crc kubenswrapper[4647]: I1128 15:31:17.181510 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="069845d01aad9e7b2035bd90b8d33350ac2a115f79a2e8bca3a165495ab5bb16" exitCode=0 Nov 28 15:31:17 crc kubenswrapper[4647]: I1128 15:31:17.181568 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"069845d01aad9e7b2035bd90b8d33350ac2a115f79a2e8bca3a165495ab5bb16"} Nov 28 15:31:17 crc kubenswrapper[4647]: I1128 15:31:17.181982 4647 scope.go:117] "RemoveContainer" containerID="8c7d4d0788c4caca10e56c807ba36469185900c2bc683a88ab15f6c3523455b4" Nov 28 15:31:18 crc kubenswrapper[4647]: I1128 15:31:18.193258 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"660b8d53a13994c7bf7661f699cd18439dd7b6d29f57d450e797e9181d450a14"} Nov 28 15:31:34 crc kubenswrapper[4647]: I1128 15:31:34.239631 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" podUID="6ab07d56-17ed-4c33-a43f-181e5ab30502" containerName="registry" containerID="cri-o://1190b6b58b034861d5481989e37d34be8baec7616c8c95c85462a880b669753a" gracePeriod=30 Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.013705 4647 generic.go:334] "Generic (PLEG): container finished" podID="6ab07d56-17ed-4c33-a43f-181e5ab30502" containerID="1190b6b58b034861d5481989e37d34be8baec7616c8c95c85462a880b669753a" exitCode=0 Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.013750 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" event={"ID":"6ab07d56-17ed-4c33-a43f-181e5ab30502","Type":"ContainerDied","Data":"1190b6b58b034861d5481989e37d34be8baec7616c8c95c85462a880b669753a"} Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.302397 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.417879 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-tls\") pod \"6ab07d56-17ed-4c33-a43f-181e5ab30502\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.417973 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xb6xf\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-kube-api-access-xb6xf\") pod \"6ab07d56-17ed-4c33-a43f-181e5ab30502\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.421157 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-certificates\") pod \"6ab07d56-17ed-4c33-a43f-181e5ab30502\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.421225 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-trusted-ca\") pod \"6ab07d56-17ed-4c33-a43f-181e5ab30502\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.421269 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6ab07d56-17ed-4c33-a43f-181e5ab30502-ca-trust-extracted\") pod \"6ab07d56-17ed-4c33-a43f-181e5ab30502\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.421325 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6ab07d56-17ed-4c33-a43f-181e5ab30502-installation-pull-secrets\") pod \"6ab07d56-17ed-4c33-a43f-181e5ab30502\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.421349 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-bound-sa-token\") pod \"6ab07d56-17ed-4c33-a43f-181e5ab30502\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.421508 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"6ab07d56-17ed-4c33-a43f-181e5ab30502\" (UID: \"6ab07d56-17ed-4c33-a43f-181e5ab30502\") " Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.422724 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "6ab07d56-17ed-4c33-a43f-181e5ab30502" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.423395 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "6ab07d56-17ed-4c33-a43f-181e5ab30502" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.427705 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ab07d56-17ed-4c33-a43f-181e5ab30502-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "6ab07d56-17ed-4c33-a43f-181e5ab30502" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.427933 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-kube-api-access-xb6xf" (OuterVolumeSpecName: "kube-api-access-xb6xf") pod "6ab07d56-17ed-4c33-a43f-181e5ab30502" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502"). InnerVolumeSpecName "kube-api-access-xb6xf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.429040 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "6ab07d56-17ed-4c33-a43f-181e5ab30502" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.430930 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "6ab07d56-17ed-4c33-a43f-181e5ab30502" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.438166 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "6ab07d56-17ed-4c33-a43f-181e5ab30502" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.451863 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ab07d56-17ed-4c33-a43f-181e5ab30502-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "6ab07d56-17ed-4c33-a43f-181e5ab30502" (UID: "6ab07d56-17ed-4c33-a43f-181e5ab30502"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.533529 4647 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.533990 4647 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6ab07d56-17ed-4c33-a43f-181e5ab30502-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.534118 4647 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6ab07d56-17ed-4c33-a43f-181e5ab30502-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.534217 4647 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.534358 4647 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6ab07d56-17ed-4c33-a43f-181e5ab30502-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.534525 4647 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:35 crc kubenswrapper[4647]: I1128 15:31:35.534631 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xb6xf\" (UniqueName: \"kubernetes.io/projected/6ab07d56-17ed-4c33-a43f-181e5ab30502-kube-api-access-xb6xf\") on node \"crc\" DevicePath \"\"" Nov 28 15:31:36 crc kubenswrapper[4647]: I1128 15:31:36.021721 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" event={"ID":"6ab07d56-17ed-4c33-a43f-181e5ab30502","Type":"ContainerDied","Data":"dea3637b3132403e676ef88703c4214bf74702c6f014e973eadbc1b60b6232f2"} Nov 28 15:31:36 crc kubenswrapper[4647]: I1128 15:31:36.021806 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-76zr7" Nov 28 15:31:36 crc kubenswrapper[4647]: I1128 15:31:36.022187 4647 scope.go:117] "RemoveContainer" containerID="1190b6b58b034861d5481989e37d34be8baec7616c8c95c85462a880b669753a" Nov 28 15:31:36 crc kubenswrapper[4647]: I1128 15:31:36.066904 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-76zr7"] Nov 28 15:31:36 crc kubenswrapper[4647]: I1128 15:31:36.074484 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-76zr7"] Nov 28 15:31:36 crc kubenswrapper[4647]: I1128 15:31:36.410187 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ab07d56-17ed-4c33-a43f-181e5ab30502" path="/var/lib/kubelet/pods/6ab07d56-17ed-4c33-a43f-181e5ab30502/volumes" Nov 28 15:33:17 crc kubenswrapper[4647]: I1128 15:33:17.022791 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:33:17 crc kubenswrapper[4647]: I1128 15:33:17.023757 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:33:47 crc kubenswrapper[4647]: I1128 15:33:47.023115 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:33:47 crc kubenswrapper[4647]: I1128 15:33:47.023862 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:34:17 crc kubenswrapper[4647]: I1128 15:34:17.023183 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:34:17 crc kubenswrapper[4647]: I1128 15:34:17.024080 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:34:17 crc kubenswrapper[4647]: I1128 15:34:17.024172 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:34:17 crc kubenswrapper[4647]: I1128 15:34:17.026148 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"660b8d53a13994c7bf7661f699cd18439dd7b6d29f57d450e797e9181d450a14"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:34:17 crc kubenswrapper[4647]: I1128 15:34:17.026214 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://660b8d53a13994c7bf7661f699cd18439dd7b6d29f57d450e797e9181d450a14" gracePeriod=600 Nov 28 15:34:17 crc kubenswrapper[4647]: I1128 15:34:17.177397 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="660b8d53a13994c7bf7661f699cd18439dd7b6d29f57d450e797e9181d450a14" exitCode=0 Nov 28 15:34:17 crc kubenswrapper[4647]: I1128 15:34:17.177475 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"660b8d53a13994c7bf7661f699cd18439dd7b6d29f57d450e797e9181d450a14"} Nov 28 15:34:17 crc kubenswrapper[4647]: I1128 15:34:17.177753 4647 scope.go:117] "RemoveContainer" containerID="069845d01aad9e7b2035bd90b8d33350ac2a115f79a2e8bca3a165495ab5bb16" Nov 28 15:34:18 crc kubenswrapper[4647]: I1128 15:34:18.189543 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"488a9380c8e2899d9c0f82c2839f811a24e73a2d247aaa352058aec582928a19"} Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.942068 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-6dlfq"] Nov 28 15:34:48 crc kubenswrapper[4647]: E1128 15:34:48.942804 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ab07d56-17ed-4c33-a43f-181e5ab30502" containerName="registry" Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.942831 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ab07d56-17ed-4c33-a43f-181e5ab30502" containerName="registry" Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.942950 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ab07d56-17ed-4c33-a43f-181e5ab30502" containerName="registry" Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.943372 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-6dlfq" Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.947888 4647 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-rx6p2" Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.948260 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.948341 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.951371 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-s96fs"] Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.952197 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-s96fs" Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.957503 4647 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-j62q5" Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.976235 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-s96fs"] Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.994954 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-7qjfk"] Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.995615 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-7qjfk" Nov 28 15:34:48 crc kubenswrapper[4647]: I1128 15:34:48.998652 4647 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-nj5x5" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.011028 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-7qjfk"] Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.016624 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-6dlfq"] Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.030816 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7djx\" (UniqueName: \"kubernetes.io/projected/24afbb10-6dd7-4492-9340-287a2b45d450-kube-api-access-j7djx\") pod \"cert-manager-webhook-5655c58dd6-7qjfk\" (UID: \"24afbb10-6dd7-4492-9340-287a2b45d450\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-7qjfk" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.030860 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcwt2\" (UniqueName: \"kubernetes.io/projected/7e8c6733-5ed1-46be-b533-e3b03a586fd5-kube-api-access-fcwt2\") pod \"cert-manager-5b446d88c5-s96fs\" (UID: \"7e8c6733-5ed1-46be-b533-e3b03a586fd5\") " pod="cert-manager/cert-manager-5b446d88c5-s96fs" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.030913 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9npt2\" (UniqueName: \"kubernetes.io/projected/15cf6e95-03c8-49db-9029-2fd5f51e14c1-kube-api-access-9npt2\") pod \"cert-manager-cainjector-7f985d654d-6dlfq\" (UID: \"15cf6e95-03c8-49db-9029-2fd5f51e14c1\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-6dlfq" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.131851 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7djx\" (UniqueName: \"kubernetes.io/projected/24afbb10-6dd7-4492-9340-287a2b45d450-kube-api-access-j7djx\") pod \"cert-manager-webhook-5655c58dd6-7qjfk\" (UID: \"24afbb10-6dd7-4492-9340-287a2b45d450\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-7qjfk" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.131899 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcwt2\" (UniqueName: \"kubernetes.io/projected/7e8c6733-5ed1-46be-b533-e3b03a586fd5-kube-api-access-fcwt2\") pod \"cert-manager-5b446d88c5-s96fs\" (UID: \"7e8c6733-5ed1-46be-b533-e3b03a586fd5\") " pod="cert-manager/cert-manager-5b446d88c5-s96fs" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.131929 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9npt2\" (UniqueName: \"kubernetes.io/projected/15cf6e95-03c8-49db-9029-2fd5f51e14c1-kube-api-access-9npt2\") pod \"cert-manager-cainjector-7f985d654d-6dlfq\" (UID: \"15cf6e95-03c8-49db-9029-2fd5f51e14c1\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-6dlfq" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.151820 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9npt2\" (UniqueName: \"kubernetes.io/projected/15cf6e95-03c8-49db-9029-2fd5f51e14c1-kube-api-access-9npt2\") pod \"cert-manager-cainjector-7f985d654d-6dlfq\" (UID: \"15cf6e95-03c8-49db-9029-2fd5f51e14c1\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-6dlfq" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.152851 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcwt2\" (UniqueName: \"kubernetes.io/projected/7e8c6733-5ed1-46be-b533-e3b03a586fd5-kube-api-access-fcwt2\") pod \"cert-manager-5b446d88c5-s96fs\" (UID: \"7e8c6733-5ed1-46be-b533-e3b03a586fd5\") " pod="cert-manager/cert-manager-5b446d88c5-s96fs" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.154528 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7djx\" (UniqueName: \"kubernetes.io/projected/24afbb10-6dd7-4492-9340-287a2b45d450-kube-api-access-j7djx\") pod \"cert-manager-webhook-5655c58dd6-7qjfk\" (UID: \"24afbb10-6dd7-4492-9340-287a2b45d450\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-7qjfk" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.262096 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-6dlfq" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.269914 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-s96fs" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.307546 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-7qjfk" Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.569867 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-s96fs"] Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.575256 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.599478 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-6dlfq"] Nov 28 15:34:49 crc kubenswrapper[4647]: W1128 15:34:49.607229 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15cf6e95_03c8_49db_9029_2fd5f51e14c1.slice/crio-f34305a5620ad0dede1cf4944b3ef654afab927e718c689ca13aaff7f2e361fa WatchSource:0}: Error finding container f34305a5620ad0dede1cf4944b3ef654afab927e718c689ca13aaff7f2e361fa: Status 404 returned error can't find the container with id f34305a5620ad0dede1cf4944b3ef654afab927e718c689ca13aaff7f2e361fa Nov 28 15:34:49 crc kubenswrapper[4647]: I1128 15:34:49.658242 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-7qjfk"] Nov 28 15:34:49 crc kubenswrapper[4647]: W1128 15:34:49.663896 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod24afbb10_6dd7_4492_9340_287a2b45d450.slice/crio-48019010c156ebee4e51cada81efb3bf9ee5aa2ca57328f6ad32e0b883bea996 WatchSource:0}: Error finding container 48019010c156ebee4e51cada81efb3bf9ee5aa2ca57328f6ad32e0b883bea996: Status 404 returned error can't find the container with id 48019010c156ebee4e51cada81efb3bf9ee5aa2ca57328f6ad32e0b883bea996 Nov 28 15:34:50 crc kubenswrapper[4647]: I1128 15:34:50.467980 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-7qjfk" event={"ID":"24afbb10-6dd7-4492-9340-287a2b45d450","Type":"ContainerStarted","Data":"48019010c156ebee4e51cada81efb3bf9ee5aa2ca57328f6ad32e0b883bea996"} Nov 28 15:34:50 crc kubenswrapper[4647]: I1128 15:34:50.468019 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-s96fs" event={"ID":"7e8c6733-5ed1-46be-b533-e3b03a586fd5","Type":"ContainerStarted","Data":"5c1c3d013dd663ba423d1e582b73dbc7e398484c9ee8ad69452464f6ab589f0c"} Nov 28 15:34:50 crc kubenswrapper[4647]: I1128 15:34:50.468029 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-6dlfq" event={"ID":"15cf6e95-03c8-49db-9029-2fd5f51e14c1","Type":"ContainerStarted","Data":"f34305a5620ad0dede1cf4944b3ef654afab927e718c689ca13aaff7f2e361fa"} Nov 28 15:34:54 crc kubenswrapper[4647]: I1128 15:34:54.486513 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-7qjfk" event={"ID":"24afbb10-6dd7-4492-9340-287a2b45d450","Type":"ContainerStarted","Data":"f8e02b4aef51850eaae2247cbff53718d62906fff1cb8256e0a38a622d04639e"} Nov 28 15:34:54 crc kubenswrapper[4647]: I1128 15:34:54.487107 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-7qjfk" Nov 28 15:34:54 crc kubenswrapper[4647]: I1128 15:34:54.489452 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-s96fs" event={"ID":"7e8c6733-5ed1-46be-b533-e3b03a586fd5","Type":"ContainerStarted","Data":"328b87b9af24445af42913b59c0e9bb636ed83d68c2ad9daba50a3f8b4ed9504"} Nov 28 15:34:54 crc kubenswrapper[4647]: I1128 15:34:54.490958 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-6dlfq" event={"ID":"15cf6e95-03c8-49db-9029-2fd5f51e14c1","Type":"ContainerStarted","Data":"0985f545b4dac7c32a1000b2d57e415af83fdfd8904c51d0b6ea6e1e4bfce7ac"} Nov 28 15:34:54 crc kubenswrapper[4647]: I1128 15:34:54.513951 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-7qjfk" podStartSLOduration=2.860136101 podStartE2EDuration="6.513932912s" podCreationTimestamp="2025-11-28 15:34:48 +0000 UTC" firstStartedPulling="2025-11-28 15:34:49.666110617 +0000 UTC m=+619.513717038" lastFinishedPulling="2025-11-28 15:34:53.319907428 +0000 UTC m=+623.167513849" observedRunningTime="2025-11-28 15:34:54.506260658 +0000 UTC m=+624.353867079" watchObservedRunningTime="2025-11-28 15:34:54.513932912 +0000 UTC m=+624.361539333" Nov 28 15:34:54 crc kubenswrapper[4647]: I1128 15:34:54.524775 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-6dlfq" podStartSLOduration=2.760612084 podStartE2EDuration="6.52475913s" podCreationTimestamp="2025-11-28 15:34:48 +0000 UTC" firstStartedPulling="2025-11-28 15:34:49.611751591 +0000 UTC m=+619.459358012" lastFinishedPulling="2025-11-28 15:34:53.375898637 +0000 UTC m=+623.223505058" observedRunningTime="2025-11-28 15:34:54.52138736 +0000 UTC m=+624.368993781" watchObservedRunningTime="2025-11-28 15:34:54.52475913 +0000 UTC m=+624.372365551" Nov 28 15:34:54 crc kubenswrapper[4647]: I1128 15:34:54.542519 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-s96fs" podStartSLOduration=2.7989728339999997 podStartE2EDuration="6.542500852s" podCreationTimestamp="2025-11-28 15:34:48 +0000 UTC" firstStartedPulling="2025-11-28 15:34:49.575022424 +0000 UTC m=+619.422628835" lastFinishedPulling="2025-11-28 15:34:53.318550432 +0000 UTC m=+623.166156853" observedRunningTime="2025-11-28 15:34:54.538997358 +0000 UTC m=+624.386603779" watchObservedRunningTime="2025-11-28 15:34:54.542500852 +0000 UTC m=+624.390107273" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.311373 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-7qjfk" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.584753 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-c76pb"] Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.585245 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovn-controller" containerID="cri-o://c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c" gracePeriod=30 Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.585341 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="northd" containerID="cri-o://3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1" gracePeriod=30 Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.585366 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="kube-rbac-proxy-node" containerID="cri-o://ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d" gracePeriod=30 Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.585599 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde" gracePeriod=30 Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.585584 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="sbdb" containerID="cri-o://12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f" gracePeriod=30 Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.585383 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovn-acl-logging" containerID="cri-o://4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f" gracePeriod=30 Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.585728 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="nbdb" containerID="cri-o://b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0" gracePeriod=30 Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.641107 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" containerID="cri-o://a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef" gracePeriod=30 Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.935349 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/3.log" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.938142 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovn-acl-logging/0.log" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.939856 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovn-controller/0.log" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.940361 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.993658 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qcz2j"] Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.993894 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.993913 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.993923 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.993930 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.993941 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="northd" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.993948 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="northd" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.993957 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="sbdb" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.993964 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="sbdb" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.993975 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.993982 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.993995 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="nbdb" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994002 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="nbdb" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.994010 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="kube-rbac-proxy-node" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994017 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="kube-rbac-proxy-node" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.994024 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994031 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.994046 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovn-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994053 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovn-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.994065 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovn-acl-logging" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994072 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovn-acl-logging" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.994082 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="kubecfg-setup" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994089 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="kubecfg-setup" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994184 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994194 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994204 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="northd" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994216 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="sbdb" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994225 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994234 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovn-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994240 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="nbdb" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994249 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994257 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="kube-rbac-proxy-node" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994265 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovn-acl-logging" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.994364 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994374 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: E1128 15:34:59.994390 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994398 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994591 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.994603 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerName="ovnkube-controller" Nov 28 15:34:59 crc kubenswrapper[4647]: I1128 15:34:59.996396 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048115 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-node-log\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048168 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-ovn\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048207 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-netns\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048245 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-slash\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048240 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-node-log" (OuterVolumeSpecName: "node-log") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048269 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-bin\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048316 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048356 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-log-socket\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048365 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-slash" (OuterVolumeSpecName: "host-slash") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048359 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048459 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-etc-openvswitch\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048503 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-script-lib\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048358 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048391 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-log-socket" (OuterVolumeSpecName: "log-socket") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048583 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048560 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-netd\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048595 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048691 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-var-lib-openvswitch\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048733 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-env-overrides\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048769 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whrxj\" (UniqueName: \"kubernetes.io/projected/de25f5ba-91da-4a77-8747-ec3a56a141df-kube-api-access-whrxj\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048770 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048797 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-systemd-units\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048821 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-kubelet\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048841 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/de25f5ba-91da-4a77-8747-ec3a56a141df-ovn-node-metrics-cert\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048864 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-config\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048883 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-ovn-kubernetes\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048893 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048906 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-var-lib-cni-networks-ovn-kubernetes\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048916 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048932 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-openvswitch\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048937 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048961 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-systemd\") pod \"de25f5ba-91da-4a77-8747-ec3a56a141df\" (UID: \"de25f5ba-91da-4a77-8747-ec3a56a141df\") " Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.048979 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049022 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049101 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-slash\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049131 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-cni-bin\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049162 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049183 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-ovn-node-metrics-cert\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049211 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-kubelet\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049191 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049325 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-run-ovn\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049355 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-ovnkube-script-lib\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049450 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-run-openvswitch\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049472 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-cni-netd\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049620 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-ovnkube-config\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049645 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thzcw\" (UniqueName: \"kubernetes.io/projected/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-kube-api-access-thzcw\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049665 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-var-lib-openvswitch\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049803 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-log-socket\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049827 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-node-log\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049875 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-etc-openvswitch\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049894 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-run-netns\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049914 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-run-systemd\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049964 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-systemd-units\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049985 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-env-overrides\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050009 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-run-ovn-kubernetes\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050071 4647 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050084 4647 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050094 4647 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050104 4647 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050113 4647 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.049927 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050091 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050122 4647 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-node-log\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050157 4647 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050171 4647 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050184 4647 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-slash\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050196 4647 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050208 4647 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-log-socket\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050220 4647 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050231 4647 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050242 4647 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.050254 4647 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.053538 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de25f5ba-91da-4a77-8747-ec3a56a141df-kube-api-access-whrxj" (OuterVolumeSpecName: "kube-api-access-whrxj") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "kube-api-access-whrxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.054215 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de25f5ba-91da-4a77-8747-ec3a56a141df-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.070252 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "de25f5ba-91da-4a77-8747-ec3a56a141df" (UID: "de25f5ba-91da-4a77-8747-ec3a56a141df"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151385 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-cni-netd\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151473 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-ovnkube-config\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151499 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thzcw\" (UniqueName: \"kubernetes.io/projected/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-kube-api-access-thzcw\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151507 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-cni-netd\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151565 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-var-lib-openvswitch\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151584 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-log-socket\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151635 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-var-lib-openvswitch\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151662 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-log-socket\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151794 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-node-log\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151602 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-node-log\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151918 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-etc-openvswitch\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151935 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-run-netns\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151952 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-run-systemd\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152015 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-run-netns\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152032 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-systemd-units\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152046 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-env-overrides\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152053 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-systemd-units\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152065 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-run-ovn-kubernetes\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152080 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-slash\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152096 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-cni-bin\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152116 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-ovn-node-metrics-cert\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152130 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152149 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-kubelet\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152170 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-run-ovn\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152184 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-ovnkube-script-lib\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152206 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-run-openvswitch\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152239 4647 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/de25f5ba-91da-4a77-8747-ec3a56a141df-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152250 4647 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152259 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whrxj\" (UniqueName: \"kubernetes.io/projected/de25f5ba-91da-4a77-8747-ec3a56a141df-kube-api-access-whrxj\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152262 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-ovnkube-config\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152268 4647 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/de25f5ba-91da-4a77-8747-ec3a56a141df-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152299 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-run-ovn-kubernetes\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152284 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-run-openvswitch\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.151988 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-etc-openvswitch\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152304 4647 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/de25f5ba-91da-4a77-8747-ec3a56a141df-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152336 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-slash\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152348 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152357 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-kubelet\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152374 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-host-cni-bin\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152380 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-run-ovn\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152568 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-env-overrides\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152003 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-run-systemd\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.152859 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-ovnkube-script-lib\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.155713 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-ovn-node-metrics-cert\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.167579 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thzcw\" (UniqueName: \"kubernetes.io/projected/5f2a3e6e-09a4-4b7e-9587-79d0412c2a92-kube-api-access-thzcw\") pod \"ovnkube-node-qcz2j\" (UID: \"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92\") " pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.310031 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:00 crc kubenswrapper[4647]: W1128 15:35:00.326145 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f2a3e6e_09a4_4b7e_9587_79d0412c2a92.slice/crio-52ebaeeb169791194ca9e5989fcc625db766e88775adbf630333b4798e672325 WatchSource:0}: Error finding container 52ebaeeb169791194ca9e5989fcc625db766e88775adbf630333b4798e672325: Status 404 returned error can't find the container with id 52ebaeeb169791194ca9e5989fcc625db766e88775adbf630333b4798e672325 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.529658 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4mdqn_8fe12df9-7deb-4f76-91cf-5b6b138d7675/kube-multus/2.log" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.530203 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4mdqn_8fe12df9-7deb-4f76-91cf-5b6b138d7675/kube-multus/1.log" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.530254 4647 generic.go:334] "Generic (PLEG): container finished" podID="8fe12df9-7deb-4f76-91cf-5b6b138d7675" containerID="8fd252daf65ad1190bad8c500d605b34feb9342c514b2cd718f506a2e6278aae" exitCode=2 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.530301 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4mdqn" event={"ID":"8fe12df9-7deb-4f76-91cf-5b6b138d7675","Type":"ContainerDied","Data":"8fd252daf65ad1190bad8c500d605b34feb9342c514b2cd718f506a2e6278aae"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.530387 4647 scope.go:117] "RemoveContainer" containerID="8e3334eac1ba45a4fcf135a0dd86a0fc10a9a737bbeb7f7c6676d20a0f601fda" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.530800 4647 scope.go:117] "RemoveContainer" containerID="8fd252daf65ad1190bad8c500d605b34feb9342c514b2cd718f506a2e6278aae" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.531010 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-4mdqn_openshift-multus(8fe12df9-7deb-4f76-91cf-5b6b138d7675)\"" pod="openshift-multus/multus-4mdqn" podUID="8fe12df9-7deb-4f76-91cf-5b6b138d7675" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.533106 4647 generic.go:334] "Generic (PLEG): container finished" podID="5f2a3e6e-09a4-4b7e-9587-79d0412c2a92" containerID="352c2b1a6ed8bd9661a12c179c1a75b8b4477ac56b650e3d9d02dd5509295802" exitCode=0 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.533337 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" event={"ID":"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92","Type":"ContainerDied","Data":"352c2b1a6ed8bd9661a12c179c1a75b8b4477ac56b650e3d9d02dd5509295802"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.533464 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" event={"ID":"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92","Type":"ContainerStarted","Data":"52ebaeeb169791194ca9e5989fcc625db766e88775adbf630333b4798e672325"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.538193 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovnkube-controller/3.log" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.541788 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovn-acl-logging/0.log" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542253 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-c76pb_de25f5ba-91da-4a77-8747-ec3a56a141df/ovn-controller/0.log" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542617 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef" exitCode=0 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542644 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f" exitCode=0 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542652 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0" exitCode=0 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542664 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1" exitCode=0 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542674 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde" exitCode=0 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542682 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d" exitCode=0 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542691 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f" exitCode=143 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542700 4647 generic.go:334] "Generic (PLEG): container finished" podID="de25f5ba-91da-4a77-8747-ec3a56a141df" containerID="c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c" exitCode=143 Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542722 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542749 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542763 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542775 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542787 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542799 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542813 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542825 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542833 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542839 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542846 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542852 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542859 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542865 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542872 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542878 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.542887 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543081 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543205 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543242 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543250 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543370 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543383 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543390 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543396 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543404 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543530 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543542 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543553 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543566 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543575 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543581 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543705 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543714 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543721 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543727 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543734 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543741 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543748 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543874 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-c76pb" event={"ID":"de25f5ba-91da-4a77-8747-ec3a56a141df","Type":"ContainerDied","Data":"fc355af2b5023259bff2abf7c649dbb01de73ec932f8f0f10ca65ad1e7140ce4"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543891 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543900 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543907 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.543913 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.544032 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.544042 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.544049 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.544056 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.544063 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.544069 4647 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848"} Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.577638 4647 scope.go:117] "RemoveContainer" containerID="a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.602363 4647 scope.go:117] "RemoveContainer" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.630784 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-c76pb"] Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.635641 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-c76pb"] Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.637736 4647 scope.go:117] "RemoveContainer" containerID="12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.650273 4647 scope.go:117] "RemoveContainer" containerID="b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.663241 4647 scope.go:117] "RemoveContainer" containerID="3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.677575 4647 scope.go:117] "RemoveContainer" containerID="2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.699835 4647 scope.go:117] "RemoveContainer" containerID="ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.774880 4647 scope.go:117] "RemoveContainer" containerID="4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.794599 4647 scope.go:117] "RemoveContainer" containerID="c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.809804 4647 scope.go:117] "RemoveContainer" containerID="13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.825759 4647 scope.go:117] "RemoveContainer" containerID="a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.826355 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef\": container with ID starting with a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef not found: ID does not exist" containerID="a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.826402 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef"} err="failed to get container status \"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef\": rpc error: code = NotFound desc = could not find container \"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef\": container with ID starting with a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.826448 4647 scope.go:117] "RemoveContainer" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.826849 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\": container with ID starting with fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60 not found: ID does not exist" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.826874 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60"} err="failed to get container status \"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\": rpc error: code = NotFound desc = could not find container \"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\": container with ID starting with fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.826893 4647 scope.go:117] "RemoveContainer" containerID="12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.827257 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\": container with ID starting with 12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f not found: ID does not exist" containerID="12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.827278 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f"} err="failed to get container status \"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\": rpc error: code = NotFound desc = could not find container \"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\": container with ID starting with 12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.827293 4647 scope.go:117] "RemoveContainer" containerID="b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.827667 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\": container with ID starting with b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0 not found: ID does not exist" containerID="b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.827687 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0"} err="failed to get container status \"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\": rpc error: code = NotFound desc = could not find container \"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\": container with ID starting with b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.827710 4647 scope.go:117] "RemoveContainer" containerID="3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.828058 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\": container with ID starting with 3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1 not found: ID does not exist" containerID="3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.828078 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1"} err="failed to get container status \"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\": rpc error: code = NotFound desc = could not find container \"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\": container with ID starting with 3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.828095 4647 scope.go:117] "RemoveContainer" containerID="2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.828441 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\": container with ID starting with 2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde not found: ID does not exist" containerID="2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.828461 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde"} err="failed to get container status \"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\": rpc error: code = NotFound desc = could not find container \"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\": container with ID starting with 2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.828476 4647 scope.go:117] "RemoveContainer" containerID="ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.828801 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\": container with ID starting with ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d not found: ID does not exist" containerID="ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.828820 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d"} err="failed to get container status \"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\": rpc error: code = NotFound desc = could not find container \"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\": container with ID starting with ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.828835 4647 scope.go:117] "RemoveContainer" containerID="4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.829286 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\": container with ID starting with 4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f not found: ID does not exist" containerID="4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.829308 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f"} err="failed to get container status \"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\": rpc error: code = NotFound desc = could not find container \"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\": container with ID starting with 4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.829327 4647 scope.go:117] "RemoveContainer" containerID="c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.829751 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\": container with ID starting with c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c not found: ID does not exist" containerID="c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.829773 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c"} err="failed to get container status \"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\": rpc error: code = NotFound desc = could not find container \"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\": container with ID starting with c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.829788 4647 scope.go:117] "RemoveContainer" containerID="13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848" Nov 28 15:35:00 crc kubenswrapper[4647]: E1128 15:35:00.830043 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\": container with ID starting with 13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848 not found: ID does not exist" containerID="13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.830064 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848"} err="failed to get container status \"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\": rpc error: code = NotFound desc = could not find container \"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\": container with ID starting with 13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.830081 4647 scope.go:117] "RemoveContainer" containerID="a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.830316 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef"} err="failed to get container status \"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef\": rpc error: code = NotFound desc = could not find container \"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef\": container with ID starting with a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.830335 4647 scope.go:117] "RemoveContainer" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.830582 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60"} err="failed to get container status \"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\": rpc error: code = NotFound desc = could not find container \"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\": container with ID starting with fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.830607 4647 scope.go:117] "RemoveContainer" containerID="12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.830817 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f"} err="failed to get container status \"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\": rpc error: code = NotFound desc = could not find container \"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\": container with ID starting with 12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.830842 4647 scope.go:117] "RemoveContainer" containerID="b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.832547 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0"} err="failed to get container status \"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\": rpc error: code = NotFound desc = could not find container \"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\": container with ID starting with b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.832574 4647 scope.go:117] "RemoveContainer" containerID="3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.834351 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1"} err="failed to get container status \"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\": rpc error: code = NotFound desc = could not find container \"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\": container with ID starting with 3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.834381 4647 scope.go:117] "RemoveContainer" containerID="2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.834684 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde"} err="failed to get container status \"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\": rpc error: code = NotFound desc = could not find container \"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\": container with ID starting with 2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.834714 4647 scope.go:117] "RemoveContainer" containerID="ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.835161 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d"} err="failed to get container status \"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\": rpc error: code = NotFound desc = could not find container \"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\": container with ID starting with ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.835213 4647 scope.go:117] "RemoveContainer" containerID="4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.835587 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f"} err="failed to get container status \"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\": rpc error: code = NotFound desc = could not find container \"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\": container with ID starting with 4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.835629 4647 scope.go:117] "RemoveContainer" containerID="c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.836334 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c"} err="failed to get container status \"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\": rpc error: code = NotFound desc = could not find container \"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\": container with ID starting with c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.836363 4647 scope.go:117] "RemoveContainer" containerID="13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.837677 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848"} err="failed to get container status \"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\": rpc error: code = NotFound desc = could not find container \"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\": container with ID starting with 13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.837730 4647 scope.go:117] "RemoveContainer" containerID="a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.838009 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef"} err="failed to get container status \"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef\": rpc error: code = NotFound desc = could not find container \"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef\": container with ID starting with a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.838034 4647 scope.go:117] "RemoveContainer" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.838353 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60"} err="failed to get container status \"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\": rpc error: code = NotFound desc = could not find container \"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\": container with ID starting with fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.838378 4647 scope.go:117] "RemoveContainer" containerID="12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.838659 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f"} err="failed to get container status \"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\": rpc error: code = NotFound desc = could not find container \"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\": container with ID starting with 12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.838683 4647 scope.go:117] "RemoveContainer" containerID="b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.838970 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0"} err="failed to get container status \"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\": rpc error: code = NotFound desc = could not find container \"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\": container with ID starting with b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.838999 4647 scope.go:117] "RemoveContainer" containerID="3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.839244 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1"} err="failed to get container status \"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\": rpc error: code = NotFound desc = could not find container \"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\": container with ID starting with 3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.839268 4647 scope.go:117] "RemoveContainer" containerID="2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.839550 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde"} err="failed to get container status \"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\": rpc error: code = NotFound desc = could not find container \"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\": container with ID starting with 2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.839596 4647 scope.go:117] "RemoveContainer" containerID="ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.839887 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d"} err="failed to get container status \"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\": rpc error: code = NotFound desc = could not find container \"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\": container with ID starting with ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.839911 4647 scope.go:117] "RemoveContainer" containerID="4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.840586 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f"} err="failed to get container status \"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\": rpc error: code = NotFound desc = could not find container \"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\": container with ID starting with 4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.840613 4647 scope.go:117] "RemoveContainer" containerID="c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.840902 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c"} err="failed to get container status \"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\": rpc error: code = NotFound desc = could not find container \"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\": container with ID starting with c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.840924 4647 scope.go:117] "RemoveContainer" containerID="13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.841222 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848"} err="failed to get container status \"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\": rpc error: code = NotFound desc = could not find container \"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\": container with ID starting with 13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.841251 4647 scope.go:117] "RemoveContainer" containerID="a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.841555 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef"} err="failed to get container status \"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef\": rpc error: code = NotFound desc = could not find container \"a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef\": container with ID starting with a71e6982966eaa4c58ac97dbe49d4ca5b54367fad75021f95327b1e747e9a8ef not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.841591 4647 scope.go:117] "RemoveContainer" containerID="fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.842056 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60"} err="failed to get container status \"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\": rpc error: code = NotFound desc = could not find container \"fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60\": container with ID starting with fee0dc20f018b44365756edda1ccfe22782b42b22d6b9eac18d5c7ed86836a60 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.842089 4647 scope.go:117] "RemoveContainer" containerID="12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.842355 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f"} err="failed to get container status \"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\": rpc error: code = NotFound desc = could not find container \"12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f\": container with ID starting with 12c2e39965d5bd26ef878b4f106f60046c27c0560101059f955c8056ff43fa2f not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.842389 4647 scope.go:117] "RemoveContainer" containerID="b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.843444 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0"} err="failed to get container status \"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\": rpc error: code = NotFound desc = could not find container \"b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0\": container with ID starting with b0973ab4305f67bf65b5fd5f0e7d777fc5057e74649b2346c6783a527d1856c0 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.843485 4647 scope.go:117] "RemoveContainer" containerID="3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.843772 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1"} err="failed to get container status \"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\": rpc error: code = NotFound desc = could not find container \"3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1\": container with ID starting with 3adae399edec25e2617c29f7a3ec47633d6258298fcc7c3dcb2da06e8b4513e1 not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.843789 4647 scope.go:117] "RemoveContainer" containerID="2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.844617 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde"} err="failed to get container status \"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\": rpc error: code = NotFound desc = could not find container \"2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde\": container with ID starting with 2f1fa8537f367660202195279c07e5cf86480e04d9a9896dd2f7bf940dbffbde not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.844637 4647 scope.go:117] "RemoveContainer" containerID="ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.845010 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d"} err="failed to get container status \"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\": rpc error: code = NotFound desc = could not find container \"ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d\": container with ID starting with ff7408979a95444251ff45e071c2a37b8985c29572354bbfd0f03742c38c605d not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.845046 4647 scope.go:117] "RemoveContainer" containerID="4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.845399 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f"} err="failed to get container status \"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\": rpc error: code = NotFound desc = could not find container \"4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f\": container with ID starting with 4c29e2696ac7bf0402f2b215dcd0fd7ee6e323c2a554edb7f930f07b2dac634f not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.845447 4647 scope.go:117] "RemoveContainer" containerID="c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.845690 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c"} err="failed to get container status \"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\": rpc error: code = NotFound desc = could not find container \"c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c\": container with ID starting with c02651050363d4c74e1e7f5370d42f8f1d20d87bb563f3826bb37d1c44446f7c not found: ID does not exist" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.845710 4647 scope.go:117] "RemoveContainer" containerID="13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848" Nov 28 15:35:00 crc kubenswrapper[4647]: I1128 15:35:00.845986 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848"} err="failed to get container status \"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\": rpc error: code = NotFound desc = could not find container \"13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848\": container with ID starting with 13cb8879629b241520da3d645bd495b6de4a3a22f894eb4a5803f9cb00e15848 not found: ID does not exist" Nov 28 15:35:01 crc kubenswrapper[4647]: I1128 15:35:01.553770 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4mdqn_8fe12df9-7deb-4f76-91cf-5b6b138d7675/kube-multus/2.log" Nov 28 15:35:01 crc kubenswrapper[4647]: I1128 15:35:01.559128 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" event={"ID":"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92","Type":"ContainerStarted","Data":"c180f078742f1a3b85cf558b5c09c8d1a3eb0c40cabddd09a2c0d365ee3d13b5"} Nov 28 15:35:01 crc kubenswrapper[4647]: I1128 15:35:01.559190 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" event={"ID":"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92","Type":"ContainerStarted","Data":"6eb269acaf2c5ce8498491c44c4d5c9fef402651cd85b527403842756b986cb7"} Nov 28 15:35:01 crc kubenswrapper[4647]: I1128 15:35:01.559210 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" event={"ID":"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92","Type":"ContainerStarted","Data":"3fa874272976047b04e9f002e19311897501eb574229a6d85fea4cae6e30989b"} Nov 28 15:35:01 crc kubenswrapper[4647]: I1128 15:35:01.559229 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" event={"ID":"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92","Type":"ContainerStarted","Data":"88fdd0440ac053391d2a5c14572398804785d2f08a83d6daeae10d0ef54a8424"} Nov 28 15:35:01 crc kubenswrapper[4647]: I1128 15:35:01.559248 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" event={"ID":"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92","Type":"ContainerStarted","Data":"4c10be4a9ab51814b2ec7b44f30f0ba0aedef70bf88249e6aa75ed6fade0b848"} Nov 28 15:35:01 crc kubenswrapper[4647]: I1128 15:35:01.559268 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" event={"ID":"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92","Type":"ContainerStarted","Data":"05a325334b6e1e4398fe618ae08123a01222502a5fcb78bab28f4a87c460f733"} Nov 28 15:35:02 crc kubenswrapper[4647]: I1128 15:35:02.404891 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de25f5ba-91da-4a77-8747-ec3a56a141df" path="/var/lib/kubelet/pods/de25f5ba-91da-4a77-8747-ec3a56a141df/volumes" Nov 28 15:35:03 crc kubenswrapper[4647]: I1128 15:35:03.578574 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" event={"ID":"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92","Type":"ContainerStarted","Data":"3a0d073456e462bb363dd0541907e4ca6f6a9850d6213a0a3069a6cf4f9ace6b"} Nov 28 15:35:06 crc kubenswrapper[4647]: I1128 15:35:06.612031 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" event={"ID":"5f2a3e6e-09a4-4b7e-9587-79d0412c2a92","Type":"ContainerStarted","Data":"c7add4664b06106592aa2cdfa60f8e1bdcc975f4e053613b015a11e3a535b15b"} Nov 28 15:35:06 crc kubenswrapper[4647]: I1128 15:35:06.613702 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:06 crc kubenswrapper[4647]: I1128 15:35:06.614088 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:06 crc kubenswrapper[4647]: I1128 15:35:06.614126 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:06 crc kubenswrapper[4647]: I1128 15:35:06.644661 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:06 crc kubenswrapper[4647]: I1128 15:35:06.647017 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:06 crc kubenswrapper[4647]: I1128 15:35:06.665032 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" podStartSLOduration=7.665013395 podStartE2EDuration="7.665013395s" podCreationTimestamp="2025-11-28 15:34:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:35:06.657059894 +0000 UTC m=+636.504666325" watchObservedRunningTime="2025-11-28 15:35:06.665013395 +0000 UTC m=+636.512619816" Nov 28 15:35:13 crc kubenswrapper[4647]: I1128 15:35:13.397566 4647 scope.go:117] "RemoveContainer" containerID="8fd252daf65ad1190bad8c500d605b34feb9342c514b2cd718f506a2e6278aae" Nov 28 15:35:13 crc kubenswrapper[4647]: E1128 15:35:13.398556 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-4mdqn_openshift-multus(8fe12df9-7deb-4f76-91cf-5b6b138d7675)\"" pod="openshift-multus/multus-4mdqn" podUID="8fe12df9-7deb-4f76-91cf-5b6b138d7675" Nov 28 15:35:28 crc kubenswrapper[4647]: I1128 15:35:28.396221 4647 scope.go:117] "RemoveContainer" containerID="8fd252daf65ad1190bad8c500d605b34feb9342c514b2cd718f506a2e6278aae" Nov 28 15:35:28 crc kubenswrapper[4647]: I1128 15:35:28.776146 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4mdqn_8fe12df9-7deb-4f76-91cf-5b6b138d7675/kube-multus/2.log" Nov 28 15:35:28 crc kubenswrapper[4647]: I1128 15:35:28.777464 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4mdqn" event={"ID":"8fe12df9-7deb-4f76-91cf-5b6b138d7675","Type":"ContainerStarted","Data":"77481a924a805210a9874df1397709a258224f60cbd28903df19d3920fa41de1"} Nov 28 15:35:30 crc kubenswrapper[4647]: I1128 15:35:30.352308 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qcz2j" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.200027 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8"] Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.201686 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.206394 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.210466 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8"] Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.403075 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6gx5\" (UniqueName: \"kubernetes.io/projected/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-kube-api-access-g6gx5\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.403242 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.403280 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.504873 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.504929 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.505002 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6gx5\" (UniqueName: \"kubernetes.io/projected/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-kube-api-access-g6gx5\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.505296 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.505385 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.532944 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6gx5\" (UniqueName: \"kubernetes.io/projected/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-kube-api-access-g6gx5\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:39 crc kubenswrapper[4647]: I1128 15:35:39.818593 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:40 crc kubenswrapper[4647]: I1128 15:35:40.251198 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8"] Nov 28 15:35:40 crc kubenswrapper[4647]: I1128 15:35:40.851064 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" event={"ID":"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618","Type":"ContainerStarted","Data":"eb0fe3cd635528cd3c72233970033124be4683ffe7521d6f6f1dbe3a93e84829"} Nov 28 15:35:42 crc kubenswrapper[4647]: I1128 15:35:42.864952 4647 generic.go:334] "Generic (PLEG): container finished" podID="6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" containerID="e5d2f29b1bbbd4757f36fd1f34c5b464c28cdc4f3794f3a8c6f508e0a9f64a4b" exitCode=0 Nov 28 15:35:42 crc kubenswrapper[4647]: I1128 15:35:42.865790 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" event={"ID":"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618","Type":"ContainerDied","Data":"e5d2f29b1bbbd4757f36fd1f34c5b464c28cdc4f3794f3a8c6f508e0a9f64a4b"} Nov 28 15:35:44 crc kubenswrapper[4647]: I1128 15:35:44.884060 4647 generic.go:334] "Generic (PLEG): container finished" podID="6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" containerID="9d65258861ba679734e64e56e672dfa9f6a5422913f1e9de9dd7d93b5b5fe802" exitCode=0 Nov 28 15:35:44 crc kubenswrapper[4647]: I1128 15:35:44.884150 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" event={"ID":"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618","Type":"ContainerDied","Data":"9d65258861ba679734e64e56e672dfa9f6a5422913f1e9de9dd7d93b5b5fe802"} Nov 28 15:35:45 crc kubenswrapper[4647]: I1128 15:35:45.895671 4647 generic.go:334] "Generic (PLEG): container finished" podID="6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" containerID="bdc25db0ea139445772f077786fef88702acb96f727a38425e2824937e2d5bac" exitCode=0 Nov 28 15:35:45 crc kubenswrapper[4647]: I1128 15:35:45.895729 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" event={"ID":"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618","Type":"ContainerDied","Data":"bdc25db0ea139445772f077786fef88702acb96f727a38425e2824937e2d5bac"} Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.158155 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.326070 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-bundle\") pod \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.326171 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6gx5\" (UniqueName: \"kubernetes.io/projected/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-kube-api-access-g6gx5\") pod \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.326210 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-util\") pod \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\" (UID: \"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618\") " Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.327389 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-bundle" (OuterVolumeSpecName: "bundle") pod "6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" (UID: "6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.341453 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-kube-api-access-g6gx5" (OuterVolumeSpecName: "kube-api-access-g6gx5") pod "6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" (UID: "6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618"). InnerVolumeSpecName "kube-api-access-g6gx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.355675 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-util" (OuterVolumeSpecName: "util") pod "6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" (UID: "6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.429133 4647 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.429184 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6gx5\" (UniqueName: \"kubernetes.io/projected/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-kube-api-access-g6gx5\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.429202 4647 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618-util\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.913801 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" event={"ID":"6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618","Type":"ContainerDied","Data":"eb0fe3cd635528cd3c72233970033124be4683ffe7521d6f6f1dbe3a93e84829"} Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.913855 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb0fe3cd635528cd3c72233970033124be4683ffe7521d6f6f1dbe3a93e84829" Nov 28 15:35:47 crc kubenswrapper[4647]: I1128 15:35:47.913882 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8" Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.787499 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn"] Nov 28 15:35:50 crc kubenswrapper[4647]: E1128 15:35:50.787904 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" containerName="extract" Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.787916 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" containerName="extract" Nov 28 15:35:50 crc kubenswrapper[4647]: E1128 15:35:50.787926 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" containerName="pull" Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.787932 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" containerName="pull" Nov 28 15:35:50 crc kubenswrapper[4647]: E1128 15:35:50.787951 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" containerName="util" Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.787957 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" containerName="util" Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.788037 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618" containerName="extract" Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.788351 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn" Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.791517 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.792136 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.795442 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-bl9gn" Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.817708 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn"] Nov 28 15:35:50 crc kubenswrapper[4647]: I1128 15:35:50.974660 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2lck\" (UniqueName: \"kubernetes.io/projected/22b9216f-dccd-4cc4-ac15-770a5edc610b-kube-api-access-m2lck\") pod \"nmstate-operator-5b5b58f5c8-2k6zn\" (UID: \"22b9216f-dccd-4cc4-ac15-770a5edc610b\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn" Nov 28 15:35:51 crc kubenswrapper[4647]: I1128 15:35:51.075196 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2lck\" (UniqueName: \"kubernetes.io/projected/22b9216f-dccd-4cc4-ac15-770a5edc610b-kube-api-access-m2lck\") pod \"nmstate-operator-5b5b58f5c8-2k6zn\" (UID: \"22b9216f-dccd-4cc4-ac15-770a5edc610b\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn" Nov 28 15:35:51 crc kubenswrapper[4647]: I1128 15:35:51.107948 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2lck\" (UniqueName: \"kubernetes.io/projected/22b9216f-dccd-4cc4-ac15-770a5edc610b-kube-api-access-m2lck\") pod \"nmstate-operator-5b5b58f5c8-2k6zn\" (UID: \"22b9216f-dccd-4cc4-ac15-770a5edc610b\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn" Nov 28 15:35:51 crc kubenswrapper[4647]: I1128 15:35:51.402737 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn" Nov 28 15:35:51 crc kubenswrapper[4647]: I1128 15:35:51.677133 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn"] Nov 28 15:35:51 crc kubenswrapper[4647]: I1128 15:35:51.936526 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn" event={"ID":"22b9216f-dccd-4cc4-ac15-770a5edc610b","Type":"ContainerStarted","Data":"fbd489bb56735fbc7d40ba46e1db9919dbcd2831a87b4312780edfd2ed129530"} Nov 28 15:35:55 crc kubenswrapper[4647]: I1128 15:35:55.961039 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn" event={"ID":"22b9216f-dccd-4cc4-ac15-770a5edc610b","Type":"ContainerStarted","Data":"9f3feb94b453db4b49499ffc5a13657b43b9f7ecbe68e79878b15df045cdf45b"} Nov 28 15:35:55 crc kubenswrapper[4647]: I1128 15:35:55.984457 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2k6zn" podStartSLOduration=1.996174247 podStartE2EDuration="5.984398918s" podCreationTimestamp="2025-11-28 15:35:50 +0000 UTC" firstStartedPulling="2025-11-28 15:35:51.686898444 +0000 UTC m=+681.534504865" lastFinishedPulling="2025-11-28 15:35:55.675123115 +0000 UTC m=+685.522729536" observedRunningTime="2025-11-28 15:35:55.979522848 +0000 UTC m=+685.827129359" watchObservedRunningTime="2025-11-28 15:35:55.984398918 +0000 UTC m=+685.832005379" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.584696 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv"] Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.587597 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.593287 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-2fvgk" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.598782 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf"] Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.599719 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.601652 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.609383 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8110d8bf-8ce5-415f-857c-6a89c9729b32-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-fs4lf\" (UID: \"8110d8bf-8ce5-415f-857c-6a89c9729b32\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.609524 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zh6s\" (UniqueName: \"kubernetes.io/projected/8110d8bf-8ce5-415f-857c-6a89c9729b32-kube-api-access-9zh6s\") pod \"nmstate-webhook-5f6d4c5ccb-fs4lf\" (UID: \"8110d8bf-8ce5-415f-857c-6a89c9729b32\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.609695 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nslm4\" (UniqueName: \"kubernetes.io/projected/6b13a440-59b0-440a-bcf5-164d5f29ceba-kube-api-access-nslm4\") pod \"nmstate-metrics-7f946cbc9-wxjgv\" (UID: \"6b13a440-59b0-440a-bcf5-164d5f29ceba\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.616462 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv"] Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.624878 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf"] Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.663144 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-2vmmg"] Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.663846 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.710677 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-ovs-socket\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.710756 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-dbus-socket\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.710796 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8110d8bf-8ce5-415f-857c-6a89c9729b32-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-fs4lf\" (UID: \"8110d8bf-8ce5-415f-857c-6a89c9729b32\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.710821 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zh6s\" (UniqueName: \"kubernetes.io/projected/8110d8bf-8ce5-415f-857c-6a89c9729b32-kube-api-access-9zh6s\") pod \"nmstate-webhook-5f6d4c5ccb-fs4lf\" (UID: \"8110d8bf-8ce5-415f-857c-6a89c9729b32\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.710878 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcqct\" (UniqueName: \"kubernetes.io/projected/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-kube-api-access-lcqct\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.710914 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nslm4\" (UniqueName: \"kubernetes.io/projected/6b13a440-59b0-440a-bcf5-164d5f29ceba-kube-api-access-nslm4\") pod \"nmstate-metrics-7f946cbc9-wxjgv\" (UID: \"6b13a440-59b0-440a-bcf5-164d5f29ceba\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.710945 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-nmstate-lock\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: E1128 15:35:59.711129 4647 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 28 15:35:59 crc kubenswrapper[4647]: E1128 15:35:59.711186 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8110d8bf-8ce5-415f-857c-6a89c9729b32-tls-key-pair podName:8110d8bf-8ce5-415f-857c-6a89c9729b32 nodeName:}" failed. No retries permitted until 2025-11-28 15:36:00.211164745 +0000 UTC m=+690.058771166 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/8110d8bf-8ce5-415f-857c-6a89c9729b32-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-fs4lf" (UID: "8110d8bf-8ce5-415f-857c-6a89c9729b32") : secret "openshift-nmstate-webhook" not found Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.738710 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zh6s\" (UniqueName: \"kubernetes.io/projected/8110d8bf-8ce5-415f-857c-6a89c9729b32-kube-api-access-9zh6s\") pod \"nmstate-webhook-5f6d4c5ccb-fs4lf\" (UID: \"8110d8bf-8ce5-415f-857c-6a89c9729b32\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.742581 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nslm4\" (UniqueName: \"kubernetes.io/projected/6b13a440-59b0-440a-bcf5-164d5f29ceba-kube-api-access-nslm4\") pod \"nmstate-metrics-7f946cbc9-wxjgv\" (UID: \"6b13a440-59b0-440a-bcf5-164d5f29ceba\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.810528 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz"] Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.816293 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-ovs-socket\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.816341 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-dbus-socket\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.816402 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcqct\" (UniqueName: \"kubernetes.io/projected/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-kube-api-access-lcqct\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.816450 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-nmstate-lock\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.817247 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-ovs-socket\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.817345 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-dbus-socket\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.817720 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-nmstate-lock\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.821902 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.830004 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.830267 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-4mjcx" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.830490 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.876017 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcqct\" (UniqueName: \"kubernetes.io/projected/e0476ba4-d83b-4a10-9898-fe3b6b05f76e-kube-api-access-lcqct\") pod \"nmstate-handler-2vmmg\" (UID: \"e0476ba4-d83b-4a10-9898-fe3b6b05f76e\") " pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.876330 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz"] Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.905329 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.937566 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/247287a6-7c4f-4dae-ab2e-9e9d144fcdd4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-njhtz\" (UID: \"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.937987 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/247287a6-7c4f-4dae-ab2e-9e9d144fcdd4-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-njhtz\" (UID: \"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.938015 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6kww\" (UniqueName: \"kubernetes.io/projected/247287a6-7c4f-4dae-ab2e-9e9d144fcdd4-kube-api-access-l6kww\") pod \"nmstate-console-plugin-7fbb5f6569-njhtz\" (UID: \"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:35:59 crc kubenswrapper[4647]: I1128 15:35:59.981809 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.009445 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6c6ccc498-bgbzq"] Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.010088 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.034830 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6c6ccc498-bgbzq"] Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.038792 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6kww\" (UniqueName: \"kubernetes.io/projected/247287a6-7c4f-4dae-ab2e-9e9d144fcdd4-kube-api-access-l6kww\") pod \"nmstate-console-plugin-7fbb5f6569-njhtz\" (UID: \"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.038851 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtsll\" (UniqueName: \"kubernetes.io/projected/26b3d5e0-1b1d-40bb-9256-244aa9947304-kube-api-access-jtsll\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.038885 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/247287a6-7c4f-4dae-ab2e-9e9d144fcdd4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-njhtz\" (UID: \"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.038919 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-oauth-serving-cert\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.038939 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/26b3d5e0-1b1d-40bb-9256-244aa9947304-console-oauth-config\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.038963 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-console-config\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.038982 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-service-ca\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.039004 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/26b3d5e0-1b1d-40bb-9256-244aa9947304-console-serving-cert\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.039025 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/247287a6-7c4f-4dae-ab2e-9e9d144fcdd4-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-njhtz\" (UID: \"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.039045 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-trusted-ca-bundle\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.043233 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/247287a6-7c4f-4dae-ab2e-9e9d144fcdd4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-njhtz\" (UID: \"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.046057 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/247287a6-7c4f-4dae-ab2e-9e9d144fcdd4-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-njhtz\" (UID: \"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.069539 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6kww\" (UniqueName: \"kubernetes.io/projected/247287a6-7c4f-4dae-ab2e-9e9d144fcdd4-kube-api-access-l6kww\") pod \"nmstate-console-plugin-7fbb5f6569-njhtz\" (UID: \"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.140178 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/26b3d5e0-1b1d-40bb-9256-244aa9947304-console-serving-cert\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.140228 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-trusted-ca-bundle\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.140262 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtsll\" (UniqueName: \"kubernetes.io/projected/26b3d5e0-1b1d-40bb-9256-244aa9947304-kube-api-access-jtsll\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.140308 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-oauth-serving-cert\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.140326 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/26b3d5e0-1b1d-40bb-9256-244aa9947304-console-oauth-config\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.140348 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-console-config\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.140367 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-service-ca\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.142630 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-trusted-ca-bundle\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.143147 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-console-config\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.143636 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-oauth-serving-cert\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.145654 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/26b3d5e0-1b1d-40bb-9256-244aa9947304-service-ca\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.149725 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.150110 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/26b3d5e0-1b1d-40bb-9256-244aa9947304-console-oauth-config\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.155207 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/26b3d5e0-1b1d-40bb-9256-244aa9947304-console-serving-cert\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.158939 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtsll\" (UniqueName: \"kubernetes.io/projected/26b3d5e0-1b1d-40bb-9256-244aa9947304-kube-api-access-jtsll\") pod \"console-6c6ccc498-bgbzq\" (UID: \"26b3d5e0-1b1d-40bb-9256-244aa9947304\") " pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.241652 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8110d8bf-8ce5-415f-857c-6a89c9729b32-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-fs4lf\" (UID: \"8110d8bf-8ce5-415f-857c-6a89c9729b32\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.244904 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8110d8bf-8ce5-415f-857c-6a89c9729b32-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-fs4lf\" (UID: \"8110d8bf-8ce5-415f-857c-6a89c9729b32\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.336245 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.404358 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz"] Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.422268 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv"] Nov 28 15:36:00 crc kubenswrapper[4647]: W1128 15:36:00.460229 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b13a440_59b0_440a_bcf5_164d5f29ceba.slice/crio-b90fab1be9fee2fee6158695ed81a6abc67fab9e007a4d922bf31f52a39595cf WatchSource:0}: Error finding container b90fab1be9fee2fee6158695ed81a6abc67fab9e007a4d922bf31f52a39595cf: Status 404 returned error can't find the container with id b90fab1be9fee2fee6158695ed81a6abc67fab9e007a4d922bf31f52a39595cf Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.516167 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.571866 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6c6ccc498-bgbzq"] Nov 28 15:36:00 crc kubenswrapper[4647]: W1128 15:36:00.578475 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26b3d5e0_1b1d_40bb_9256_244aa9947304.slice/crio-a83c4eb22f7979e9f327cde87e623821cdf1d124d0483a4c8b7f4e1ed29bc888 WatchSource:0}: Error finding container a83c4eb22f7979e9f327cde87e623821cdf1d124d0483a4c8b7f4e1ed29bc888: Status 404 returned error can't find the container with id a83c4eb22f7979e9f327cde87e623821cdf1d124d0483a4c8b7f4e1ed29bc888 Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.717179 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf"] Nov 28 15:36:00 crc kubenswrapper[4647]: I1128 15:36:00.999806 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" event={"ID":"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4","Type":"ContainerStarted","Data":"66356c8e28db25194a2fc6c0fc3c82e563bac59caad73ba27de89f279138b560"} Nov 28 15:36:01 crc kubenswrapper[4647]: I1128 15:36:01.001787 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv" event={"ID":"6b13a440-59b0-440a-bcf5-164d5f29ceba","Type":"ContainerStarted","Data":"b90fab1be9fee2fee6158695ed81a6abc67fab9e007a4d922bf31f52a39595cf"} Nov 28 15:36:01 crc kubenswrapper[4647]: I1128 15:36:01.003617 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-2vmmg" event={"ID":"e0476ba4-d83b-4a10-9898-fe3b6b05f76e","Type":"ContainerStarted","Data":"dfc2e3d163cf2805bee8234a9e6f82fb852bdc4774031b3a7c4485d15c83f1f2"} Nov 28 15:36:01 crc kubenswrapper[4647]: I1128 15:36:01.004861 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" event={"ID":"8110d8bf-8ce5-415f-857c-6a89c9729b32","Type":"ContainerStarted","Data":"528c9e5496fc1c25454a230133efeb52a2de60f04c979060aec709437fa9507a"} Nov 28 15:36:01 crc kubenswrapper[4647]: I1128 15:36:01.006070 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6c6ccc498-bgbzq" event={"ID":"26b3d5e0-1b1d-40bb-9256-244aa9947304","Type":"ContainerStarted","Data":"a83c4eb22f7979e9f327cde87e623821cdf1d124d0483a4c8b7f4e1ed29bc888"} Nov 28 15:36:02 crc kubenswrapper[4647]: I1128 15:36:02.015820 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6c6ccc498-bgbzq" event={"ID":"26b3d5e0-1b1d-40bb-9256-244aa9947304","Type":"ContainerStarted","Data":"5e3cf36aeb1f495743fc711b0dd2146ce42d01e8079236f875e2580617ecedb4"} Nov 28 15:36:02 crc kubenswrapper[4647]: I1128 15:36:02.040479 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6c6ccc498-bgbzq" podStartSLOduration=3.04038594 podStartE2EDuration="3.04038594s" podCreationTimestamp="2025-11-28 15:35:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:36:02.035958082 +0000 UTC m=+691.883564493" watchObservedRunningTime="2025-11-28 15:36:02.04038594 +0000 UTC m=+691.887992361" Nov 28 15:36:04 crc kubenswrapper[4647]: I1128 15:36:04.030472 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" event={"ID":"8110d8bf-8ce5-415f-857c-6a89c9729b32","Type":"ContainerStarted","Data":"328515fd689869d76b7dfdf3c1884d3659ced193eaf9c8574d51300446020025"} Nov 28 15:36:04 crc kubenswrapper[4647]: I1128 15:36:04.031169 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:36:04 crc kubenswrapper[4647]: I1128 15:36:04.034305 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" event={"ID":"247287a6-7c4f-4dae-ab2e-9e9d144fcdd4","Type":"ContainerStarted","Data":"a29106233d5fda91a8a1c4ff53b2f31785f2e49e606c8bdd7183e41700a7003f"} Nov 28 15:36:04 crc kubenswrapper[4647]: I1128 15:36:04.036051 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv" event={"ID":"6b13a440-59b0-440a-bcf5-164d5f29ceba","Type":"ContainerStarted","Data":"0c6d57b11fa01cd6b5b681327d507c0bc21b6ec642d26103c6b47a68c17fbb85"} Nov 28 15:36:04 crc kubenswrapper[4647]: I1128 15:36:04.046580 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" podStartSLOduration=2.012872724 podStartE2EDuration="5.046564177s" podCreationTimestamp="2025-11-28 15:35:59 +0000 UTC" firstStartedPulling="2025-11-28 15:36:00.725380609 +0000 UTC m=+690.572987030" lastFinishedPulling="2025-11-28 15:36:03.759072062 +0000 UTC m=+693.606678483" observedRunningTime="2025-11-28 15:36:04.044386029 +0000 UTC m=+693.891992450" watchObservedRunningTime="2025-11-28 15:36:04.046564177 +0000 UTC m=+693.894170598" Nov 28 15:36:04 crc kubenswrapper[4647]: I1128 15:36:04.068694 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-njhtz" podStartSLOduration=1.7356191010000002 podStartE2EDuration="5.068666414s" podCreationTimestamp="2025-11-28 15:35:59 +0000 UTC" firstStartedPulling="2025-11-28 15:36:00.426025789 +0000 UTC m=+690.273632210" lastFinishedPulling="2025-11-28 15:36:03.759073102 +0000 UTC m=+693.606679523" observedRunningTime="2025-11-28 15:36:04.063800384 +0000 UTC m=+693.911406805" watchObservedRunningTime="2025-11-28 15:36:04.068666414 +0000 UTC m=+693.916272835" Nov 28 15:36:05 crc kubenswrapper[4647]: I1128 15:36:05.047936 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-2vmmg" event={"ID":"e0476ba4-d83b-4a10-9898-fe3b6b05f76e","Type":"ContainerStarted","Data":"9c246fdfde37c8e0481bb07f82635032d7defc405c005025c95c2e574b33f1ea"} Nov 28 15:36:05 crc kubenswrapper[4647]: I1128 15:36:05.048510 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:36:05 crc kubenswrapper[4647]: I1128 15:36:05.068619 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-2vmmg" podStartSLOduration=2.321764487 podStartE2EDuration="6.068597038s" podCreationTimestamp="2025-11-28 15:35:59 +0000 UTC" firstStartedPulling="2025-11-28 15:36:00.037661866 +0000 UTC m=+689.885268287" lastFinishedPulling="2025-11-28 15:36:03.784494417 +0000 UTC m=+693.632100838" observedRunningTime="2025-11-28 15:36:05.064743656 +0000 UTC m=+694.912350077" watchObservedRunningTime="2025-11-28 15:36:05.068597038 +0000 UTC m=+694.916203469" Nov 28 15:36:08 crc kubenswrapper[4647]: I1128 15:36:08.081534 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv" event={"ID":"6b13a440-59b0-440a-bcf5-164d5f29ceba","Type":"ContainerStarted","Data":"7df558faa99837c7292452ec9ce08dbaf4f87ac4093df6b02e22c84f12f7870c"} Nov 28 15:36:08 crc kubenswrapper[4647]: I1128 15:36:08.111356 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-wxjgv" podStartSLOduration=2.573863742 podStartE2EDuration="9.111332791s" podCreationTimestamp="2025-11-28 15:35:59 +0000 UTC" firstStartedPulling="2025-11-28 15:36:00.462867808 +0000 UTC m=+690.310474239" lastFinishedPulling="2025-11-28 15:36:07.000336857 +0000 UTC m=+696.847943288" observedRunningTime="2025-11-28 15:36:08.109205065 +0000 UTC m=+697.956811516" watchObservedRunningTime="2025-11-28 15:36:08.111332791 +0000 UTC m=+697.958939222" Nov 28 15:36:10 crc kubenswrapper[4647]: I1128 15:36:10.017332 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-2vmmg" Nov 28 15:36:10 crc kubenswrapper[4647]: I1128 15:36:10.337581 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:10 crc kubenswrapper[4647]: I1128 15:36:10.337646 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:10 crc kubenswrapper[4647]: I1128 15:36:10.348570 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:11 crc kubenswrapper[4647]: I1128 15:36:11.106955 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6c6ccc498-bgbzq" Nov 28 15:36:11 crc kubenswrapper[4647]: I1128 15:36:11.179071 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-z99bl"] Nov 28 15:36:17 crc kubenswrapper[4647]: I1128 15:36:17.023554 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:36:17 crc kubenswrapper[4647]: I1128 15:36:17.024025 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:36:20 crc kubenswrapper[4647]: I1128 15:36:20.526556 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-fs4lf" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.233369 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-z99bl" podUID="96144897-cea5-48a0-ad58-ccfa928aba03" containerName="console" containerID="cri-o://240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553" gracePeriod=15 Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.279902 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k"] Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.281261 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.283450 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.286062 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k"] Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.389403 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.389702 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.389773 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfz4x\" (UniqueName: \"kubernetes.io/projected/df1a3745-7401-4dbd-be91-533022effe1a-kube-api-access-sfz4x\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.490656 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfz4x\" (UniqueName: \"kubernetes.io/projected/df1a3745-7401-4dbd-be91-533022effe1a-kube-api-access-sfz4x\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.490723 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.490749 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.491600 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.493513 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.514852 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfz4x\" (UniqueName: \"kubernetes.io/projected/df1a3745-7401-4dbd-be91-533022effe1a-kube-api-access-sfz4x\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.582158 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-z99bl_96144897-cea5-48a0-ad58-ccfa928aba03/console/0.log" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.582240 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.633085 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.693046 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-oauth-config\") pod \"96144897-cea5-48a0-ad58-ccfa928aba03\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.693110 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-oauth-serving-cert\") pod \"96144897-cea5-48a0-ad58-ccfa928aba03\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.693195 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-service-ca\") pod \"96144897-cea5-48a0-ad58-ccfa928aba03\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.693234 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-trusted-ca-bundle\") pod \"96144897-cea5-48a0-ad58-ccfa928aba03\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.693310 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6hqg\" (UniqueName: \"kubernetes.io/projected/96144897-cea5-48a0-ad58-ccfa928aba03-kube-api-access-r6hqg\") pod \"96144897-cea5-48a0-ad58-ccfa928aba03\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.693394 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-serving-cert\") pod \"96144897-cea5-48a0-ad58-ccfa928aba03\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.693982 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "96144897-cea5-48a0-ad58-ccfa928aba03" (UID: "96144897-cea5-48a0-ad58-ccfa928aba03"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.694045 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "96144897-cea5-48a0-ad58-ccfa928aba03" (UID: "96144897-cea5-48a0-ad58-ccfa928aba03"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.694225 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-console-config\") pod \"96144897-cea5-48a0-ad58-ccfa928aba03\" (UID: \"96144897-cea5-48a0-ad58-ccfa928aba03\") " Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.694605 4647 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.694623 4647 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.694689 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-console-config" (OuterVolumeSpecName: "console-config") pod "96144897-cea5-48a0-ad58-ccfa928aba03" (UID: "96144897-cea5-48a0-ad58-ccfa928aba03"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.694937 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-service-ca" (OuterVolumeSpecName: "service-ca") pod "96144897-cea5-48a0-ad58-ccfa928aba03" (UID: "96144897-cea5-48a0-ad58-ccfa928aba03"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.697813 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "96144897-cea5-48a0-ad58-ccfa928aba03" (UID: "96144897-cea5-48a0-ad58-ccfa928aba03"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.697899 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96144897-cea5-48a0-ad58-ccfa928aba03-kube-api-access-r6hqg" (OuterVolumeSpecName: "kube-api-access-r6hqg") pod "96144897-cea5-48a0-ad58-ccfa928aba03" (UID: "96144897-cea5-48a0-ad58-ccfa928aba03"). InnerVolumeSpecName "kube-api-access-r6hqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.698014 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "96144897-cea5-48a0-ad58-ccfa928aba03" (UID: "96144897-cea5-48a0-ad58-ccfa928aba03"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.795672 4647 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.795704 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6hqg\" (UniqueName: \"kubernetes.io/projected/96144897-cea5-48a0-ad58-ccfa928aba03-kube-api-access-r6hqg\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.795718 4647 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.795730 4647 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/96144897-cea5-48a0-ad58-ccfa928aba03-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.795741 4647 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/96144897-cea5-48a0-ad58-ccfa928aba03-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:36 crc kubenswrapper[4647]: I1128 15:36:36.837210 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k"] Nov 28 15:36:36 crc kubenswrapper[4647]: W1128 15:36:36.844579 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf1a3745_7401_4dbd_be91_533022effe1a.slice/crio-57b4f36016b5ed1a01e7d6a82027242be99df0d1642ea5713b3fef8a84dece7e WatchSource:0}: Error finding container 57b4f36016b5ed1a01e7d6a82027242be99df0d1642ea5713b3fef8a84dece7e: Status 404 returned error can't find the container with id 57b4f36016b5ed1a01e7d6a82027242be99df0d1642ea5713b3fef8a84dece7e Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.314580 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-z99bl_96144897-cea5-48a0-ad58-ccfa928aba03/console/0.log" Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.315032 4647 generic.go:334] "Generic (PLEG): container finished" podID="96144897-cea5-48a0-ad58-ccfa928aba03" containerID="240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553" exitCode=2 Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.315130 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-z99bl" event={"ID":"96144897-cea5-48a0-ad58-ccfa928aba03","Type":"ContainerDied","Data":"240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553"} Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.315156 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-z99bl" Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.315187 4647 scope.go:117] "RemoveContainer" containerID="240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553" Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.315170 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-z99bl" event={"ID":"96144897-cea5-48a0-ad58-ccfa928aba03","Type":"ContainerDied","Data":"9202dedae5f86040a0c28a873c50ce5c66b36fa4c7285a6cf99113d35c71377c"} Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.319820 4647 generic.go:334] "Generic (PLEG): container finished" podID="df1a3745-7401-4dbd-be91-533022effe1a" containerID="acbd3c473d2c0489d5aa6840464c4740cbc5223745fc6097325382c28363c4e6" exitCode=0 Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.319872 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" event={"ID":"df1a3745-7401-4dbd-be91-533022effe1a","Type":"ContainerDied","Data":"acbd3c473d2c0489d5aa6840464c4740cbc5223745fc6097325382c28363c4e6"} Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.319906 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" event={"ID":"df1a3745-7401-4dbd-be91-533022effe1a","Type":"ContainerStarted","Data":"57b4f36016b5ed1a01e7d6a82027242be99df0d1642ea5713b3fef8a84dece7e"} Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.357547 4647 scope.go:117] "RemoveContainer" containerID="240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553" Nov 28 15:36:37 crc kubenswrapper[4647]: E1128 15:36:37.361837 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553\": container with ID starting with 240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553 not found: ID does not exist" containerID="240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553" Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.361921 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553"} err="failed to get container status \"240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553\": rpc error: code = NotFound desc = could not find container \"240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553\": container with ID starting with 240ec76f47a32c183de4927aa1bcc8ebffc6123ad97ccf2c3cbf091c09501553 not found: ID does not exist" Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.403737 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-z99bl"] Nov 28 15:36:37 crc kubenswrapper[4647]: I1128 15:36:37.410037 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-z99bl"] Nov 28 15:36:38 crc kubenswrapper[4647]: I1128 15:36:38.407683 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96144897-cea5-48a0-ad58-ccfa928aba03" path="/var/lib/kubelet/pods/96144897-cea5-48a0-ad58-ccfa928aba03/volumes" Nov 28 15:36:39 crc kubenswrapper[4647]: I1128 15:36:39.339300 4647 generic.go:334] "Generic (PLEG): container finished" podID="df1a3745-7401-4dbd-be91-533022effe1a" containerID="df0113704448694f04d557e0af6655bc1206f3acfa7597087d43537bf3c0b65c" exitCode=0 Nov 28 15:36:39 crc kubenswrapper[4647]: I1128 15:36:39.339353 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" event={"ID":"df1a3745-7401-4dbd-be91-533022effe1a","Type":"ContainerDied","Data":"df0113704448694f04d557e0af6655bc1206f3acfa7597087d43537bf3c0b65c"} Nov 28 15:36:40 crc kubenswrapper[4647]: I1128 15:36:40.355532 4647 generic.go:334] "Generic (PLEG): container finished" podID="df1a3745-7401-4dbd-be91-533022effe1a" containerID="850344b56d35ebe558ebc7eb0f1a2381c895ed93a9f6a62802aef20e9a55d18b" exitCode=0 Nov 28 15:36:40 crc kubenswrapper[4647]: I1128 15:36:40.355854 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" event={"ID":"df1a3745-7401-4dbd-be91-533022effe1a","Type":"ContainerDied","Data":"850344b56d35ebe558ebc7eb0f1a2381c895ed93a9f6a62802aef20e9a55d18b"} Nov 28 15:36:41 crc kubenswrapper[4647]: I1128 15:36:41.682842 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:41 crc kubenswrapper[4647]: I1128 15:36:41.864686 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-bundle\") pod \"df1a3745-7401-4dbd-be91-533022effe1a\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " Nov 28 15:36:41 crc kubenswrapper[4647]: I1128 15:36:41.864854 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfz4x\" (UniqueName: \"kubernetes.io/projected/df1a3745-7401-4dbd-be91-533022effe1a-kube-api-access-sfz4x\") pod \"df1a3745-7401-4dbd-be91-533022effe1a\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " Nov 28 15:36:41 crc kubenswrapper[4647]: I1128 15:36:41.864973 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-util\") pod \"df1a3745-7401-4dbd-be91-533022effe1a\" (UID: \"df1a3745-7401-4dbd-be91-533022effe1a\") " Nov 28 15:36:41 crc kubenswrapper[4647]: I1128 15:36:41.866517 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-bundle" (OuterVolumeSpecName: "bundle") pod "df1a3745-7401-4dbd-be91-533022effe1a" (UID: "df1a3745-7401-4dbd-be91-533022effe1a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:36:41 crc kubenswrapper[4647]: I1128 15:36:41.875310 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df1a3745-7401-4dbd-be91-533022effe1a-kube-api-access-sfz4x" (OuterVolumeSpecName: "kube-api-access-sfz4x") pod "df1a3745-7401-4dbd-be91-533022effe1a" (UID: "df1a3745-7401-4dbd-be91-533022effe1a"). InnerVolumeSpecName "kube-api-access-sfz4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:36:41 crc kubenswrapper[4647]: I1128 15:36:41.893286 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-util" (OuterVolumeSpecName: "util") pod "df1a3745-7401-4dbd-be91-533022effe1a" (UID: "df1a3745-7401-4dbd-be91-533022effe1a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:36:41 crc kubenswrapper[4647]: I1128 15:36:41.966500 4647 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-util\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:41 crc kubenswrapper[4647]: I1128 15:36:41.966551 4647 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/df1a3745-7401-4dbd-be91-533022effe1a-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:41 crc kubenswrapper[4647]: I1128 15:36:41.966568 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfz4x\" (UniqueName: \"kubernetes.io/projected/df1a3745-7401-4dbd-be91-533022effe1a-kube-api-access-sfz4x\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:42 crc kubenswrapper[4647]: I1128 15:36:42.376855 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" Nov 28 15:36:42 crc kubenswrapper[4647]: I1128 15:36:42.376671 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k" event={"ID":"df1a3745-7401-4dbd-be91-533022effe1a","Type":"ContainerDied","Data":"57b4f36016b5ed1a01e7d6a82027242be99df0d1642ea5713b3fef8a84dece7e"} Nov 28 15:36:42 crc kubenswrapper[4647]: I1128 15:36:42.377764 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57b4f36016b5ed1a01e7d6a82027242be99df0d1642ea5713b3fef8a84dece7e" Nov 28 15:36:47 crc kubenswrapper[4647]: I1128 15:36:47.023291 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:36:47 crc kubenswrapper[4647]: I1128 15:36:47.024086 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.634145 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq"] Nov 28 15:36:50 crc kubenswrapper[4647]: E1128 15:36:50.634841 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df1a3745-7401-4dbd-be91-533022effe1a" containerName="pull" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.634861 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="df1a3745-7401-4dbd-be91-533022effe1a" containerName="pull" Nov 28 15:36:50 crc kubenswrapper[4647]: E1128 15:36:50.634874 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df1a3745-7401-4dbd-be91-533022effe1a" containerName="util" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.634884 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="df1a3745-7401-4dbd-be91-533022effe1a" containerName="util" Nov 28 15:36:50 crc kubenswrapper[4647]: E1128 15:36:50.634900 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df1a3745-7401-4dbd-be91-533022effe1a" containerName="extract" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.634911 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="df1a3745-7401-4dbd-be91-533022effe1a" containerName="extract" Nov 28 15:36:50 crc kubenswrapper[4647]: E1128 15:36:50.634922 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96144897-cea5-48a0-ad58-ccfa928aba03" containerName="console" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.634932 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="96144897-cea5-48a0-ad58-ccfa928aba03" containerName="console" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.635074 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="df1a3745-7401-4dbd-be91-533022effe1a" containerName="extract" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.635091 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="96144897-cea5-48a0-ad58-ccfa928aba03" containerName="console" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.635582 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.637878 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.638256 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.638630 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.638826 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.638885 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-tgj22" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.659656 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq"] Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.787730 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/660b76dc-783c-4df5-938b-7df9e2af467a-apiservice-cert\") pod \"metallb-operator-controller-manager-6b8f4b57d8-9nmkq\" (UID: \"660b76dc-783c-4df5-938b-7df9e2af467a\") " pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.787808 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/660b76dc-783c-4df5-938b-7df9e2af467a-webhook-cert\") pod \"metallb-operator-controller-manager-6b8f4b57d8-9nmkq\" (UID: \"660b76dc-783c-4df5-938b-7df9e2af467a\") " pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.788064 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d846\" (UniqueName: \"kubernetes.io/projected/660b76dc-783c-4df5-938b-7df9e2af467a-kube-api-access-4d846\") pod \"metallb-operator-controller-manager-6b8f4b57d8-9nmkq\" (UID: \"660b76dc-783c-4df5-938b-7df9e2af467a\") " pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.876630 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2"] Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.877276 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.888238 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-4z7mb" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.888640 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.888895 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.890156 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/660b76dc-783c-4df5-938b-7df9e2af467a-apiservice-cert\") pod \"metallb-operator-controller-manager-6b8f4b57d8-9nmkq\" (UID: \"660b76dc-783c-4df5-938b-7df9e2af467a\") " pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.890203 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/660b76dc-783c-4df5-938b-7df9e2af467a-webhook-cert\") pod \"metallb-operator-controller-manager-6b8f4b57d8-9nmkq\" (UID: \"660b76dc-783c-4df5-938b-7df9e2af467a\") " pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.890643 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d846\" (UniqueName: \"kubernetes.io/projected/660b76dc-783c-4df5-938b-7df9e2af467a-kube-api-access-4d846\") pod \"metallb-operator-controller-manager-6b8f4b57d8-9nmkq\" (UID: \"660b76dc-783c-4df5-938b-7df9e2af467a\") " pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.918748 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2"] Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.925622 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/660b76dc-783c-4df5-938b-7df9e2af467a-apiservice-cert\") pod \"metallb-operator-controller-manager-6b8f4b57d8-9nmkq\" (UID: \"660b76dc-783c-4df5-938b-7df9e2af467a\") " pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.933685 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/660b76dc-783c-4df5-938b-7df9e2af467a-webhook-cert\") pod \"metallb-operator-controller-manager-6b8f4b57d8-9nmkq\" (UID: \"660b76dc-783c-4df5-938b-7df9e2af467a\") " pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.943383 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d846\" (UniqueName: \"kubernetes.io/projected/660b76dc-783c-4df5-938b-7df9e2af467a-kube-api-access-4d846\") pod \"metallb-operator-controller-manager-6b8f4b57d8-9nmkq\" (UID: \"660b76dc-783c-4df5-938b-7df9e2af467a\") " pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.950373 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.992949 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgr7j\" (UniqueName: \"kubernetes.io/projected/a68e1898-f4f8-468a-98fc-03e4f01397e4-kube-api-access-dgr7j\") pod \"metallb-operator-webhook-server-6846cd54fc-hhsz2\" (UID: \"a68e1898-f4f8-468a-98fc-03e4f01397e4\") " pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.993005 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a68e1898-f4f8-468a-98fc-03e4f01397e4-apiservice-cert\") pod \"metallb-operator-webhook-server-6846cd54fc-hhsz2\" (UID: \"a68e1898-f4f8-468a-98fc-03e4f01397e4\") " pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:50 crc kubenswrapper[4647]: I1128 15:36:50.993073 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a68e1898-f4f8-468a-98fc-03e4f01397e4-webhook-cert\") pod \"metallb-operator-webhook-server-6846cd54fc-hhsz2\" (UID: \"a68e1898-f4f8-468a-98fc-03e4f01397e4\") " pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:51 crc kubenswrapper[4647]: I1128 15:36:51.095629 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgr7j\" (UniqueName: \"kubernetes.io/projected/a68e1898-f4f8-468a-98fc-03e4f01397e4-kube-api-access-dgr7j\") pod \"metallb-operator-webhook-server-6846cd54fc-hhsz2\" (UID: \"a68e1898-f4f8-468a-98fc-03e4f01397e4\") " pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:51 crc kubenswrapper[4647]: I1128 15:36:51.095999 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a68e1898-f4f8-468a-98fc-03e4f01397e4-apiservice-cert\") pod \"metallb-operator-webhook-server-6846cd54fc-hhsz2\" (UID: \"a68e1898-f4f8-468a-98fc-03e4f01397e4\") " pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:51 crc kubenswrapper[4647]: I1128 15:36:51.096150 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a68e1898-f4f8-468a-98fc-03e4f01397e4-webhook-cert\") pod \"metallb-operator-webhook-server-6846cd54fc-hhsz2\" (UID: \"a68e1898-f4f8-468a-98fc-03e4f01397e4\") " pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:51 crc kubenswrapper[4647]: I1128 15:36:51.104973 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a68e1898-f4f8-468a-98fc-03e4f01397e4-webhook-cert\") pod \"metallb-operator-webhook-server-6846cd54fc-hhsz2\" (UID: \"a68e1898-f4f8-468a-98fc-03e4f01397e4\") " pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:51 crc kubenswrapper[4647]: I1128 15:36:51.105520 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a68e1898-f4f8-468a-98fc-03e4f01397e4-apiservice-cert\") pod \"metallb-operator-webhook-server-6846cd54fc-hhsz2\" (UID: \"a68e1898-f4f8-468a-98fc-03e4f01397e4\") " pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:51 crc kubenswrapper[4647]: I1128 15:36:51.128086 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgr7j\" (UniqueName: \"kubernetes.io/projected/a68e1898-f4f8-468a-98fc-03e4f01397e4-kube-api-access-dgr7j\") pod \"metallb-operator-webhook-server-6846cd54fc-hhsz2\" (UID: \"a68e1898-f4f8-468a-98fc-03e4f01397e4\") " pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:51 crc kubenswrapper[4647]: I1128 15:36:51.215793 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:51 crc kubenswrapper[4647]: I1128 15:36:51.238492 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq"] Nov 28 15:36:51 crc kubenswrapper[4647]: I1128 15:36:51.428861 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" event={"ID":"660b76dc-783c-4df5-938b-7df9e2af467a","Type":"ContainerStarted","Data":"b71c7bab59d06e685180c20bf54ee73769a673454cbf72afc017033c9218d101"} Nov 28 15:36:51 crc kubenswrapper[4647]: I1128 15:36:51.535732 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2"] Nov 28 15:36:51 crc kubenswrapper[4647]: W1128 15:36:51.544668 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda68e1898_f4f8_468a_98fc_03e4f01397e4.slice/crio-011edaabd26f1525e89ea213659c9ddacdb39b28b2127674df285312bf9509a5 WatchSource:0}: Error finding container 011edaabd26f1525e89ea213659c9ddacdb39b28b2127674df285312bf9509a5: Status 404 returned error can't find the container with id 011edaabd26f1525e89ea213659c9ddacdb39b28b2127674df285312bf9509a5 Nov 28 15:36:52 crc kubenswrapper[4647]: I1128 15:36:52.435915 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" event={"ID":"a68e1898-f4f8-468a-98fc-03e4f01397e4","Type":"ContainerStarted","Data":"011edaabd26f1525e89ea213659c9ddacdb39b28b2127674df285312bf9509a5"} Nov 28 15:36:55 crc kubenswrapper[4647]: I1128 15:36:55.462630 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" event={"ID":"660b76dc-783c-4df5-938b-7df9e2af467a","Type":"ContainerStarted","Data":"56181fb77cd36d51e38f2d4f0b7281577d6eb6e39b7dd59ef0d2748586590c0b"} Nov 28 15:36:55 crc kubenswrapper[4647]: I1128 15:36:55.463109 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:36:59 crc kubenswrapper[4647]: I1128 15:36:59.491389 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" event={"ID":"a68e1898-f4f8-468a-98fc-03e4f01397e4","Type":"ContainerStarted","Data":"fe7bb43660243a0c4e462b62ff44de503465a3acbfeb00239b13a37e05e47744"} Nov 28 15:36:59 crc kubenswrapper[4647]: I1128 15:36:59.492211 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:36:59 crc kubenswrapper[4647]: I1128 15:36:59.525021 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" podStartSLOduration=2.619021514 podStartE2EDuration="9.525000413s" podCreationTimestamp="2025-11-28 15:36:50 +0000 UTC" firstStartedPulling="2025-11-28 15:36:51.54715255 +0000 UTC m=+741.394758971" lastFinishedPulling="2025-11-28 15:36:58.453131449 +0000 UTC m=+748.300737870" observedRunningTime="2025-11-28 15:36:59.523138143 +0000 UTC m=+749.370744584" watchObservedRunningTime="2025-11-28 15:36:59.525000413 +0000 UTC m=+749.372606854" Nov 28 15:36:59 crc kubenswrapper[4647]: I1128 15:36:59.525928 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" podStartSLOduration=6.430451498 podStartE2EDuration="9.525922897s" podCreationTimestamp="2025-11-28 15:36:50 +0000 UTC" firstStartedPulling="2025-11-28 15:36:51.257958597 +0000 UTC m=+741.105565018" lastFinishedPulling="2025-11-28 15:36:54.353429996 +0000 UTC m=+744.201036417" observedRunningTime="2025-11-28 15:36:55.486238145 +0000 UTC m=+745.333844586" watchObservedRunningTime="2025-11-28 15:36:59.525922897 +0000 UTC m=+749.373529328" Nov 28 15:37:11 crc kubenswrapper[4647]: I1128 15:37:11.223023 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-6846cd54fc-hhsz2" Nov 28 15:37:11 crc kubenswrapper[4647]: I1128 15:37:11.987778 4647 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 15:37:17 crc kubenswrapper[4647]: I1128 15:37:17.022769 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:37:17 crc kubenswrapper[4647]: I1128 15:37:17.024623 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:37:17 crc kubenswrapper[4647]: I1128 15:37:17.024838 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:37:17 crc kubenswrapper[4647]: I1128 15:37:17.025818 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"488a9380c8e2899d9c0f82c2839f811a24e73a2d247aaa352058aec582928a19"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:37:17 crc kubenswrapper[4647]: I1128 15:37:17.026072 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://488a9380c8e2899d9c0f82c2839f811a24e73a2d247aaa352058aec582928a19" gracePeriod=600 Nov 28 15:37:17 crc kubenswrapper[4647]: I1128 15:37:17.596886 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="488a9380c8e2899d9c0f82c2839f811a24e73a2d247aaa352058aec582928a19" exitCode=0 Nov 28 15:37:17 crc kubenswrapper[4647]: I1128 15:37:17.596958 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"488a9380c8e2899d9c0f82c2839f811a24e73a2d247aaa352058aec582928a19"} Nov 28 15:37:17 crc kubenswrapper[4647]: I1128 15:37:17.597288 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"074d2a9435958e2c7736d08961659c11b166c59191e27b818dd5c1f09fc03871"} Nov 28 15:37:17 crc kubenswrapper[4647]: I1128 15:37:17.597324 4647 scope.go:117] "RemoveContainer" containerID="660b8d53a13994c7bf7661f699cd18439dd7b6d29f57d450e797e9181d450a14" Nov 28 15:37:30 crc kubenswrapper[4647]: I1128 15:37:30.954664 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6b8f4b57d8-9nmkq" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.724856 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-vnbr8"] Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.728204 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.729819 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft"] Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.730634 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.735044 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.750028 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.752949 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.754285 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-z477d" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.758083 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft"] Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.834216 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-2csd4"] Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.835089 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-2csd4" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.836923 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.838205 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-jgx8g" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.838979 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.839945 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.867021 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-xqlc5"] Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.868130 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.874752 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875306 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-frr-sockets\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875360 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbm28\" (UniqueName: \"kubernetes.io/projected/6850e82e-6e47-4f7e-a861-aa1e2f29b468-kube-api-access-vbm28\") pod \"frr-k8s-webhook-server-7fcb986d4-74jft\" (UID: \"6850e82e-6e47-4f7e-a861-aa1e2f29b468\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875386 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-metrics\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875429 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-metrics-certs\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875453 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-memberlist\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875496 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n2ws\" (UniqueName: \"kubernetes.io/projected/dc709627-0843-4e8f-8485-5ac40ec5b457-kube-api-access-6n2ws\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875513 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-metrics-certs\") pod \"controller-f8648f98b-xqlc5\" (UID: \"c4214344-1c2e-48f0-a1cb-c0a0414c8e77\") " pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875542 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-reloader\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875560 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/dc709627-0843-4e8f-8485-5ac40ec5b457-frr-startup\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875580 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp2bv\" (UniqueName: \"kubernetes.io/projected/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-kube-api-access-rp2bv\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875593 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-frr-conf\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875658 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dc709627-0843-4e8f-8485-5ac40ec5b457-metrics-certs\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875692 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-metallb-excludel2\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875715 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwf2k\" (UniqueName: \"kubernetes.io/projected/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-kube-api-access-cwf2k\") pod \"controller-f8648f98b-xqlc5\" (UID: \"c4214344-1c2e-48f0-a1cb-c0a0414c8e77\") " pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875747 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-cert\") pod \"controller-f8648f98b-xqlc5\" (UID: \"c4214344-1c2e-48f0-a1cb-c0a0414c8e77\") " pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.875771 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6850e82e-6e47-4f7e-a861-aa1e2f29b468-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-74jft\" (UID: \"6850e82e-6e47-4f7e-a861-aa1e2f29b468\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.889729 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-xqlc5"] Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977063 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-metrics-certs\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977096 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-memberlist\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977121 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n2ws\" (UniqueName: \"kubernetes.io/projected/dc709627-0843-4e8f-8485-5ac40ec5b457-kube-api-access-6n2ws\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977141 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-metrics-certs\") pod \"controller-f8648f98b-xqlc5\" (UID: \"c4214344-1c2e-48f0-a1cb-c0a0414c8e77\") " pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977161 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-reloader\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977178 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/dc709627-0843-4e8f-8485-5ac40ec5b457-frr-startup\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977202 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp2bv\" (UniqueName: \"kubernetes.io/projected/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-kube-api-access-rp2bv\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977217 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-frr-conf\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: E1128 15:37:31.977219 4647 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977247 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dc709627-0843-4e8f-8485-5ac40ec5b457-metrics-certs\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977268 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-metallb-excludel2\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:31 crc kubenswrapper[4647]: E1128 15:37:31.977299 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-metrics-certs podName:8ee0a7ea-967a-457c-9d3b-1eb46c99b719 nodeName:}" failed. No retries permitted until 2025-11-28 15:37:32.477277327 +0000 UTC m=+782.324883748 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-metrics-certs") pod "speaker-2csd4" (UID: "8ee0a7ea-967a-457c-9d3b-1eb46c99b719") : secret "speaker-certs-secret" not found Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977325 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwf2k\" (UniqueName: \"kubernetes.io/projected/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-kube-api-access-cwf2k\") pod \"controller-f8648f98b-xqlc5\" (UID: \"c4214344-1c2e-48f0-a1cb-c0a0414c8e77\") " pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977357 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-cert\") pod \"controller-f8648f98b-xqlc5\" (UID: \"c4214344-1c2e-48f0-a1cb-c0a0414c8e77\") " pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977376 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6850e82e-6e47-4f7e-a861-aa1e2f29b468-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-74jft\" (UID: \"6850e82e-6e47-4f7e-a861-aa1e2f29b468\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977401 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-frr-sockets\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977434 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbm28\" (UniqueName: \"kubernetes.io/projected/6850e82e-6e47-4f7e-a861-aa1e2f29b468-kube-api-access-vbm28\") pod \"frr-k8s-webhook-server-7fcb986d4-74jft\" (UID: \"6850e82e-6e47-4f7e-a861-aa1e2f29b468\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977455 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-metrics\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977740 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-reloader\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977758 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-metrics\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.977908 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-metallb-excludel2\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:31 crc kubenswrapper[4647]: E1128 15:37:31.978145 4647 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 28 15:37:31 crc kubenswrapper[4647]: E1128 15:37:31.978169 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6850e82e-6e47-4f7e-a861-aa1e2f29b468-cert podName:6850e82e-6e47-4f7e-a861-aa1e2f29b468 nodeName:}" failed. No retries permitted until 2025-11-28 15:37:32.47816204 +0000 UTC m=+782.325768461 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6850e82e-6e47-4f7e-a861-aa1e2f29b468-cert") pod "frr-k8s-webhook-server-7fcb986d4-74jft" (UID: "6850e82e-6e47-4f7e-a861-aa1e2f29b468") : secret "frr-k8s-webhook-server-cert" not found Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.978359 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-frr-sockets\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.978577 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/dc709627-0843-4e8f-8485-5ac40ec5b457-frr-startup\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.978777 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/dc709627-0843-4e8f-8485-5ac40ec5b457-frr-conf\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:31 crc kubenswrapper[4647]: E1128 15:37:31.978838 4647 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 28 15:37:31 crc kubenswrapper[4647]: E1128 15:37:31.978860 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dc709627-0843-4e8f-8485-5ac40ec5b457-metrics-certs podName:dc709627-0843-4e8f-8485-5ac40ec5b457 nodeName:}" failed. No retries permitted until 2025-11-28 15:37:32.478852708 +0000 UTC m=+782.326459129 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/dc709627-0843-4e8f-8485-5ac40ec5b457-metrics-certs") pod "frr-k8s-vnbr8" (UID: "dc709627-0843-4e8f-8485-5ac40ec5b457") : secret "frr-k8s-certs-secret" not found Nov 28 15:37:31 crc kubenswrapper[4647]: E1128 15:37:31.978859 4647 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 28 15:37:31 crc kubenswrapper[4647]: E1128 15:37:31.978891 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-metrics-certs podName:c4214344-1c2e-48f0-a1cb-c0a0414c8e77 nodeName:}" failed. No retries permitted until 2025-11-28 15:37:32.478882619 +0000 UTC m=+782.326489040 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-metrics-certs") pod "controller-f8648f98b-xqlc5" (UID: "c4214344-1c2e-48f0-a1cb-c0a0414c8e77") : secret "controller-certs-secret" not found Nov 28 15:37:31 crc kubenswrapper[4647]: E1128 15:37:31.978907 4647 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 15:37:31 crc kubenswrapper[4647]: E1128 15:37:31.978929 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-memberlist podName:8ee0a7ea-967a-457c-9d3b-1eb46c99b719 nodeName:}" failed. No retries permitted until 2025-11-28 15:37:32.47892327 +0000 UTC m=+782.326529691 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-memberlist") pod "speaker-2csd4" (UID: "8ee0a7ea-967a-457c-9d3b-1eb46c99b719") : secret "metallb-memberlist" not found Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.981292 4647 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.993767 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-cert\") pod \"controller-f8648f98b-xqlc5\" (UID: \"c4214344-1c2e-48f0-a1cb-c0a0414c8e77\") " pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:31 crc kubenswrapper[4647]: I1128 15:37:31.999080 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbm28\" (UniqueName: \"kubernetes.io/projected/6850e82e-6e47-4f7e-a861-aa1e2f29b468-kube-api-access-vbm28\") pod \"frr-k8s-webhook-server-7fcb986d4-74jft\" (UID: \"6850e82e-6e47-4f7e-a861-aa1e2f29b468\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.002952 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n2ws\" (UniqueName: \"kubernetes.io/projected/dc709627-0843-4e8f-8485-5ac40ec5b457-kube-api-access-6n2ws\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.003096 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwf2k\" (UniqueName: \"kubernetes.io/projected/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-kube-api-access-cwf2k\") pod \"controller-f8648f98b-xqlc5\" (UID: \"c4214344-1c2e-48f0-a1cb-c0a0414c8e77\") " pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.024858 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp2bv\" (UniqueName: \"kubernetes.io/projected/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-kube-api-access-rp2bv\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.485157 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dc709627-0843-4e8f-8485-5ac40ec5b457-metrics-certs\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.485265 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6850e82e-6e47-4f7e-a861-aa1e2f29b468-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-74jft\" (UID: \"6850e82e-6e47-4f7e-a861-aa1e2f29b468\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.485329 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-metrics-certs\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.485352 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-memberlist\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.485385 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-metrics-certs\") pod \"controller-f8648f98b-xqlc5\" (UID: \"c4214344-1c2e-48f0-a1cb-c0a0414c8e77\") " pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:32 crc kubenswrapper[4647]: E1128 15:37:32.485862 4647 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 15:37:32 crc kubenswrapper[4647]: E1128 15:37:32.486025 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-memberlist podName:8ee0a7ea-967a-457c-9d3b-1eb46c99b719 nodeName:}" failed. No retries permitted until 2025-11-28 15:37:33.485997981 +0000 UTC m=+783.333604402 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-memberlist") pod "speaker-2csd4" (UID: "8ee0a7ea-967a-457c-9d3b-1eb46c99b719") : secret "metallb-memberlist" not found Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.489675 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c4214344-1c2e-48f0-a1cb-c0a0414c8e77-metrics-certs\") pod \"controller-f8648f98b-xqlc5\" (UID: \"c4214344-1c2e-48f0-a1cb-c0a0414c8e77\") " pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.490346 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6850e82e-6e47-4f7e-a861-aa1e2f29b468-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-74jft\" (UID: \"6850e82e-6e47-4f7e-a861-aa1e2f29b468\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.490559 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-metrics-certs\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.494717 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dc709627-0843-4e8f-8485-5ac40ec5b457-metrics-certs\") pod \"frr-k8s-vnbr8\" (UID: \"dc709627-0843-4e8f-8485-5ac40ec5b457\") " pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.647280 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.653059 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.785537 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:32 crc kubenswrapper[4647]: I1128 15:37:32.930908 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft"] Nov 28 15:37:32 crc kubenswrapper[4647]: W1128 15:37:32.938917 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6850e82e_6e47_4f7e_a861_aa1e2f29b468.slice/crio-31f46001f800c9c5db0a11878d759a01fe10b4b27e5629bea2e70f2c3c6e2beb WatchSource:0}: Error finding container 31f46001f800c9c5db0a11878d759a01fe10b4b27e5629bea2e70f2c3c6e2beb: Status 404 returned error can't find the container with id 31f46001f800c9c5db0a11878d759a01fe10b4b27e5629bea2e70f2c3c6e2beb Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.009483 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-xqlc5"] Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.508316 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-memberlist\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.528802 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8ee0a7ea-967a-457c-9d3b-1eb46c99b719-memberlist\") pod \"speaker-2csd4\" (UID: \"8ee0a7ea-967a-457c-9d3b-1eb46c99b719\") " pod="metallb-system/speaker-2csd4" Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.557231 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" event={"ID":"6850e82e-6e47-4f7e-a861-aa1e2f29b468","Type":"ContainerStarted","Data":"31f46001f800c9c5db0a11878d759a01fe10b4b27e5629bea2e70f2c3c6e2beb"} Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.560403 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-xqlc5" event={"ID":"c4214344-1c2e-48f0-a1cb-c0a0414c8e77","Type":"ContainerStarted","Data":"c174f0c2304cce5b9e5a0b1d972f1cd6c7a9b8ea55f56a8489f5fac8e4b5c143"} Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.560480 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-xqlc5" event={"ID":"c4214344-1c2e-48f0-a1cb-c0a0414c8e77","Type":"ContainerStarted","Data":"3c4bb3ea00c58b5e21b3300d2e2758c7a4e48f0a461ff49bfeb3ff48c27d76b4"} Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.560509 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.560522 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-xqlc5" event={"ID":"c4214344-1c2e-48f0-a1cb-c0a0414c8e77","Type":"ContainerStarted","Data":"38f451cbaec6517d59cb43d9f6fd8fc9e94c0d8840f71563abfc37012018dc5a"} Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.561949 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vnbr8" event={"ID":"dc709627-0843-4e8f-8485-5ac40ec5b457","Type":"ContainerStarted","Data":"6a53940a7015fe8d4d4f071d1610e12bed3109221543c6b26c39ca1dbbe0698c"} Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.579025 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-xqlc5" podStartSLOduration=2.578999501 podStartE2EDuration="2.578999501s" podCreationTimestamp="2025-11-28 15:37:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:37:33.57706349 +0000 UTC m=+783.424669911" watchObservedRunningTime="2025-11-28 15:37:33.578999501 +0000 UTC m=+783.426605942" Nov 28 15:37:33 crc kubenswrapper[4647]: I1128 15:37:33.647077 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-2csd4" Nov 28 15:37:33 crc kubenswrapper[4647]: W1128 15:37:33.672724 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ee0a7ea_967a_457c_9d3b_1eb46c99b719.slice/crio-f728b0a675405cf073227133102b05d566948fc97501c7999c15916443aa6fb6 WatchSource:0}: Error finding container f728b0a675405cf073227133102b05d566948fc97501c7999c15916443aa6fb6: Status 404 returned error can't find the container with id f728b0a675405cf073227133102b05d566948fc97501c7999c15916443aa6fb6 Nov 28 15:37:34 crc kubenswrapper[4647]: I1128 15:37:34.572139 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2csd4" event={"ID":"8ee0a7ea-967a-457c-9d3b-1eb46c99b719","Type":"ContainerStarted","Data":"fcb9032468ce99b2630b927fa260219a98b950d5579698f294b6e83681a748f3"} Nov 28 15:37:34 crc kubenswrapper[4647]: I1128 15:37:34.572478 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2csd4" event={"ID":"8ee0a7ea-967a-457c-9d3b-1eb46c99b719","Type":"ContainerStarted","Data":"e98c3dcd9bb1f133bf67d21683ba7ebcf5ed3246aad1b4d852a170fde4cc1ddd"} Nov 28 15:37:34 crc kubenswrapper[4647]: I1128 15:37:34.572490 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2csd4" event={"ID":"8ee0a7ea-967a-457c-9d3b-1eb46c99b719","Type":"ContainerStarted","Data":"f728b0a675405cf073227133102b05d566948fc97501c7999c15916443aa6fb6"} Nov 28 15:37:34 crc kubenswrapper[4647]: I1128 15:37:34.572656 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-2csd4" Nov 28 15:37:34 crc kubenswrapper[4647]: I1128 15:37:34.623658 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-2csd4" podStartSLOduration=3.62364125 podStartE2EDuration="3.62364125s" podCreationTimestamp="2025-11-28 15:37:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:37:34.622019987 +0000 UTC m=+784.469626408" watchObservedRunningTime="2025-11-28 15:37:34.62364125 +0000 UTC m=+784.471247671" Nov 28 15:37:41 crc kubenswrapper[4647]: I1128 15:37:41.631430 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" event={"ID":"6850e82e-6e47-4f7e-a861-aa1e2f29b468","Type":"ContainerStarted","Data":"28a49e99a0bc36d33b12e277a49e924bc087c13e096662af69bc0f424b86a022"} Nov 28 15:37:41 crc kubenswrapper[4647]: I1128 15:37:41.632088 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:41 crc kubenswrapper[4647]: I1128 15:37:41.634748 4647 generic.go:334] "Generic (PLEG): container finished" podID="dc709627-0843-4e8f-8485-5ac40ec5b457" containerID="3b1b9196773784105df8bb8cfcd7f104a4991fc6f20ecff70189de2d78c846c1" exitCode=0 Nov 28 15:37:41 crc kubenswrapper[4647]: I1128 15:37:41.634789 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vnbr8" event={"ID":"dc709627-0843-4e8f-8485-5ac40ec5b457","Type":"ContainerDied","Data":"3b1b9196773784105df8bb8cfcd7f104a4991fc6f20ecff70189de2d78c846c1"} Nov 28 15:37:41 crc kubenswrapper[4647]: I1128 15:37:41.684014 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" podStartSLOduration=2.280811614 podStartE2EDuration="10.683998326s" podCreationTimestamp="2025-11-28 15:37:31 +0000 UTC" firstStartedPulling="2025-11-28 15:37:32.941332002 +0000 UTC m=+782.788938413" lastFinishedPulling="2025-11-28 15:37:41.344518654 +0000 UTC m=+791.192125125" observedRunningTime="2025-11-28 15:37:41.657671239 +0000 UTC m=+791.505277660" watchObservedRunningTime="2025-11-28 15:37:41.683998326 +0000 UTC m=+791.531604747" Nov 28 15:37:42 crc kubenswrapper[4647]: I1128 15:37:42.641082 4647 generic.go:334] "Generic (PLEG): container finished" podID="dc709627-0843-4e8f-8485-5ac40ec5b457" containerID="014d1898e8b911ed15fc8f54f398037e8ea6e5c03e0823304736c6648b8403c1" exitCode=0 Nov 28 15:37:42 crc kubenswrapper[4647]: I1128 15:37:42.641124 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vnbr8" event={"ID":"dc709627-0843-4e8f-8485-5ac40ec5b457","Type":"ContainerDied","Data":"014d1898e8b911ed15fc8f54f398037e8ea6e5c03e0823304736c6648b8403c1"} Nov 28 15:37:43 crc kubenswrapper[4647]: I1128 15:37:43.652102 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-2csd4" Nov 28 15:37:43 crc kubenswrapper[4647]: I1128 15:37:43.658958 4647 generic.go:334] "Generic (PLEG): container finished" podID="dc709627-0843-4e8f-8485-5ac40ec5b457" containerID="05c61cf52b5939813784782aedc227ba381b9b8a68cbdee6beb426edf922f0eb" exitCode=0 Nov 28 15:37:43 crc kubenswrapper[4647]: I1128 15:37:43.659020 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vnbr8" event={"ID":"dc709627-0843-4e8f-8485-5ac40ec5b457","Type":"ContainerDied","Data":"05c61cf52b5939813784782aedc227ba381b9b8a68cbdee6beb426edf922f0eb"} Nov 28 15:37:44 crc kubenswrapper[4647]: I1128 15:37:44.676235 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vnbr8" event={"ID":"dc709627-0843-4e8f-8485-5ac40ec5b457","Type":"ContainerStarted","Data":"e803b71f5153c31aa781409d0d110291066633429c14fb67ab5a5c11afb528ef"} Nov 28 15:37:44 crc kubenswrapper[4647]: I1128 15:37:44.677501 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vnbr8" event={"ID":"dc709627-0843-4e8f-8485-5ac40ec5b457","Type":"ContainerStarted","Data":"26c548687113195b471706ec66343155e971caf12e5cac0d46c9eadadb3bf39f"} Nov 28 15:37:44 crc kubenswrapper[4647]: I1128 15:37:44.677624 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vnbr8" event={"ID":"dc709627-0843-4e8f-8485-5ac40ec5b457","Type":"ContainerStarted","Data":"11bf5dab106d8720be01a0c8443343060edf7363bd6a0a2df26764c4d93a04e7"} Nov 28 15:37:44 crc kubenswrapper[4647]: I1128 15:37:44.677762 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vnbr8" event={"ID":"dc709627-0843-4e8f-8485-5ac40ec5b457","Type":"ContainerStarted","Data":"dcfbb47b5b62a628797928ae7e2e2bc1b05897bc669159e46de74f14eab8f327"} Nov 28 15:37:44 crc kubenswrapper[4647]: I1128 15:37:44.677863 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vnbr8" event={"ID":"dc709627-0843-4e8f-8485-5ac40ec5b457","Type":"ContainerStarted","Data":"dc78d835a036d72e159e8d111b057816f389b0b3e1bbfc1da606d190d729d571"} Nov 28 15:37:45 crc kubenswrapper[4647]: I1128 15:37:45.693704 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vnbr8" event={"ID":"dc709627-0843-4e8f-8485-5ac40ec5b457","Type":"ContainerStarted","Data":"7b132b568477abf353b1cac1117ac9c04c4f34553feabcf1b5bbd72ec0053899"} Nov 28 15:37:45 crc kubenswrapper[4647]: I1128 15:37:45.694334 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.599227 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-vnbr8" podStartSLOduration=7.072703633 podStartE2EDuration="15.599198852s" podCreationTimestamp="2025-11-28 15:37:31 +0000 UTC" firstStartedPulling="2025-11-28 15:37:32.825875513 +0000 UTC m=+782.673481934" lastFinishedPulling="2025-11-28 15:37:41.352370722 +0000 UTC m=+791.199977153" observedRunningTime="2025-11-28 15:37:45.735113505 +0000 UTC m=+795.582719966" watchObservedRunningTime="2025-11-28 15:37:46.599198852 +0000 UTC m=+796.446805313" Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.601013 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-vtdpv"] Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.602177 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vtdpv" Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.606201 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-cbg7x" Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.606311 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.611818 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vtdpv"] Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.614872 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.661640 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j68w5\" (UniqueName: \"kubernetes.io/projected/76ab5951-89d3-4ad1-8b6b-26982de63912-kube-api-access-j68w5\") pod \"openstack-operator-index-vtdpv\" (UID: \"76ab5951-89d3-4ad1-8b6b-26982de63912\") " pod="openstack-operators/openstack-operator-index-vtdpv" Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.762156 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j68w5\" (UniqueName: \"kubernetes.io/projected/76ab5951-89d3-4ad1-8b6b-26982de63912-kube-api-access-j68w5\") pod \"openstack-operator-index-vtdpv\" (UID: \"76ab5951-89d3-4ad1-8b6b-26982de63912\") " pod="openstack-operators/openstack-operator-index-vtdpv" Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.779447 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j68w5\" (UniqueName: \"kubernetes.io/projected/76ab5951-89d3-4ad1-8b6b-26982de63912-kube-api-access-j68w5\") pod \"openstack-operator-index-vtdpv\" (UID: \"76ab5951-89d3-4ad1-8b6b-26982de63912\") " pod="openstack-operators/openstack-operator-index-vtdpv" Nov 28 15:37:46 crc kubenswrapper[4647]: I1128 15:37:46.918642 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vtdpv" Nov 28 15:37:47 crc kubenswrapper[4647]: I1128 15:37:47.467281 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vtdpv"] Nov 28 15:37:47 crc kubenswrapper[4647]: W1128 15:37:47.475529 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76ab5951_89d3_4ad1_8b6b_26982de63912.slice/crio-a600dd945a9b10457e7033960d7c09eded8162bfb307a2d8ceb083adf7f08189 WatchSource:0}: Error finding container a600dd945a9b10457e7033960d7c09eded8162bfb307a2d8ceb083adf7f08189: Status 404 returned error can't find the container with id a600dd945a9b10457e7033960d7c09eded8162bfb307a2d8ceb083adf7f08189 Nov 28 15:37:47 crc kubenswrapper[4647]: I1128 15:37:47.648187 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:47 crc kubenswrapper[4647]: I1128 15:37:47.690816 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:37:47 crc kubenswrapper[4647]: I1128 15:37:47.706252 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vtdpv" event={"ID":"76ab5951-89d3-4ad1-8b6b-26982de63912","Type":"ContainerStarted","Data":"a600dd945a9b10457e7033960d7c09eded8162bfb307a2d8ceb083adf7f08189"} Nov 28 15:37:51 crc kubenswrapper[4647]: I1128 15:37:51.741231 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vtdpv" event={"ID":"76ab5951-89d3-4ad1-8b6b-26982de63912","Type":"ContainerStarted","Data":"f880a3c89bf977635c034bfd8466e5b455b591078b87b4f9d0e1f80e971548d3"} Nov 28 15:37:51 crc kubenswrapper[4647]: I1128 15:37:51.761753 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-vtdpv" podStartSLOduration=2.222270802 podStartE2EDuration="5.761734191s" podCreationTimestamp="2025-11-28 15:37:46 +0000 UTC" firstStartedPulling="2025-11-28 15:37:47.477737792 +0000 UTC m=+797.325344233" lastFinishedPulling="2025-11-28 15:37:51.017201211 +0000 UTC m=+800.864807622" observedRunningTime="2025-11-28 15:37:51.760318524 +0000 UTC m=+801.607924955" watchObservedRunningTime="2025-11-28 15:37:51.761734191 +0000 UTC m=+801.609340622" Nov 28 15:37:52 crc kubenswrapper[4647]: I1128 15:37:52.665559 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-74jft" Nov 28 15:37:52 crc kubenswrapper[4647]: I1128 15:37:52.790939 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-xqlc5" Nov 28 15:37:56 crc kubenswrapper[4647]: I1128 15:37:56.919151 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-vtdpv" Nov 28 15:37:56 crc kubenswrapper[4647]: I1128 15:37:56.920605 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-vtdpv" Nov 28 15:37:56 crc kubenswrapper[4647]: I1128 15:37:56.963851 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-vtdpv" Nov 28 15:37:57 crc kubenswrapper[4647]: I1128 15:37:57.826244 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-vtdpv" Nov 28 15:37:58 crc kubenswrapper[4647]: I1128 15:37:58.811570 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw"] Nov 28 15:37:58 crc kubenswrapper[4647]: I1128 15:37:58.813052 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:58 crc kubenswrapper[4647]: I1128 15:37:58.815380 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-7xlrf" Nov 28 15:37:58 crc kubenswrapper[4647]: I1128 15:37:58.833607 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw"] Nov 28 15:37:58 crc kubenswrapper[4647]: I1128 15:37:58.949827 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqncw\" (UniqueName: \"kubernetes.io/projected/efdd2a38-2a54-411c-8463-3adc8f3cd634-kube-api-access-zqncw\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:58 crc kubenswrapper[4647]: I1128 15:37:58.950047 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-bundle\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:58 crc kubenswrapper[4647]: I1128 15:37:58.950102 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-util\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:59 crc kubenswrapper[4647]: I1128 15:37:59.052079 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqncw\" (UniqueName: \"kubernetes.io/projected/efdd2a38-2a54-411c-8463-3adc8f3cd634-kube-api-access-zqncw\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:59 crc kubenswrapper[4647]: I1128 15:37:59.052270 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-bundle\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:59 crc kubenswrapper[4647]: I1128 15:37:59.052327 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-util\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:59 crc kubenswrapper[4647]: I1128 15:37:59.053336 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-bundle\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:59 crc kubenswrapper[4647]: I1128 15:37:59.054007 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-util\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:59 crc kubenswrapper[4647]: I1128 15:37:59.084597 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqncw\" (UniqueName: \"kubernetes.io/projected/efdd2a38-2a54-411c-8463-3adc8f3cd634-kube-api-access-zqncw\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:59 crc kubenswrapper[4647]: I1128 15:37:59.153604 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:37:59 crc kubenswrapper[4647]: I1128 15:37:59.391722 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw"] Nov 28 15:37:59 crc kubenswrapper[4647]: I1128 15:37:59.802922 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" event={"ID":"efdd2a38-2a54-411c-8463-3adc8f3cd634","Type":"ContainerStarted","Data":"8304148e76a357507706b41d0be8613fec70c4a247722c21ca4f12cdec73b5e4"} Nov 28 15:38:00 crc kubenswrapper[4647]: I1128 15:38:00.815170 4647 generic.go:334] "Generic (PLEG): container finished" podID="efdd2a38-2a54-411c-8463-3adc8f3cd634" containerID="8791d5c8c2fec15ee78983c3ab5d0e01dac8d596f6937cf4bb896fc3e4ddb651" exitCode=0 Nov 28 15:38:00 crc kubenswrapper[4647]: I1128 15:38:00.815269 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" event={"ID":"efdd2a38-2a54-411c-8463-3adc8f3cd634","Type":"ContainerDied","Data":"8791d5c8c2fec15ee78983c3ab5d0e01dac8d596f6937cf4bb896fc3e4ddb651"} Nov 28 15:38:01 crc kubenswrapper[4647]: I1128 15:38:01.828748 4647 generic.go:334] "Generic (PLEG): container finished" podID="efdd2a38-2a54-411c-8463-3adc8f3cd634" containerID="fc2acef1efc30f0de3f9eb9ca50d2be6ec04f221598f39775fe32b87d5529373" exitCode=0 Nov 28 15:38:01 crc kubenswrapper[4647]: I1128 15:38:01.828849 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" event={"ID":"efdd2a38-2a54-411c-8463-3adc8f3cd634","Type":"ContainerDied","Data":"fc2acef1efc30f0de3f9eb9ca50d2be6ec04f221598f39775fe32b87d5529373"} Nov 28 15:38:02 crc kubenswrapper[4647]: I1128 15:38:02.651726 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-vnbr8" Nov 28 15:38:02 crc kubenswrapper[4647]: I1128 15:38:02.840568 4647 generic.go:334] "Generic (PLEG): container finished" podID="efdd2a38-2a54-411c-8463-3adc8f3cd634" containerID="ccc152b19eb985f8dfefc2cc88b4c9371984317a245fc2c5e7f28226b48c6ea2" exitCode=0 Nov 28 15:38:02 crc kubenswrapper[4647]: I1128 15:38:02.840663 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" event={"ID":"efdd2a38-2a54-411c-8463-3adc8f3cd634","Type":"ContainerDied","Data":"ccc152b19eb985f8dfefc2cc88b4c9371984317a245fc2c5e7f28226b48c6ea2"} Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.190739 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.340180 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-bundle\") pod \"efdd2a38-2a54-411c-8463-3adc8f3cd634\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.340246 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-util\") pod \"efdd2a38-2a54-411c-8463-3adc8f3cd634\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.340351 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqncw\" (UniqueName: \"kubernetes.io/projected/efdd2a38-2a54-411c-8463-3adc8f3cd634-kube-api-access-zqncw\") pod \"efdd2a38-2a54-411c-8463-3adc8f3cd634\" (UID: \"efdd2a38-2a54-411c-8463-3adc8f3cd634\") " Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.341370 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-bundle" (OuterVolumeSpecName: "bundle") pod "efdd2a38-2a54-411c-8463-3adc8f3cd634" (UID: "efdd2a38-2a54-411c-8463-3adc8f3cd634"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.352722 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd2a38-2a54-411c-8463-3adc8f3cd634-kube-api-access-zqncw" (OuterVolumeSpecName: "kube-api-access-zqncw") pod "efdd2a38-2a54-411c-8463-3adc8f3cd634" (UID: "efdd2a38-2a54-411c-8463-3adc8f3cd634"). InnerVolumeSpecName "kube-api-access-zqncw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.358694 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-util" (OuterVolumeSpecName: "util") pod "efdd2a38-2a54-411c-8463-3adc8f3cd634" (UID: "efdd2a38-2a54-411c-8463-3adc8f3cd634"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.442318 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqncw\" (UniqueName: \"kubernetes.io/projected/efdd2a38-2a54-411c-8463-3adc8f3cd634-kube-api-access-zqncw\") on node \"crc\" DevicePath \"\"" Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.442381 4647 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.442394 4647 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/efdd2a38-2a54-411c-8463-3adc8f3cd634-util\") on node \"crc\" DevicePath \"\"" Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.860101 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" event={"ID":"efdd2a38-2a54-411c-8463-3adc8f3cd634","Type":"ContainerDied","Data":"8304148e76a357507706b41d0be8613fec70c4a247722c21ca4f12cdec73b5e4"} Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.860176 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8304148e76a357507706b41d0be8613fec70c4a247722c21ca4f12cdec73b5e4" Nov 28 15:38:04 crc kubenswrapper[4647]: I1128 15:38:04.860285 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw" Nov 28 15:38:07 crc kubenswrapper[4647]: I1128 15:38:07.941168 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz"] Nov 28 15:38:07 crc kubenswrapper[4647]: E1128 15:38:07.941674 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efdd2a38-2a54-411c-8463-3adc8f3cd634" containerName="util" Nov 28 15:38:07 crc kubenswrapper[4647]: I1128 15:38:07.941689 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="efdd2a38-2a54-411c-8463-3adc8f3cd634" containerName="util" Nov 28 15:38:07 crc kubenswrapper[4647]: E1128 15:38:07.941704 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efdd2a38-2a54-411c-8463-3adc8f3cd634" containerName="pull" Nov 28 15:38:07 crc kubenswrapper[4647]: I1128 15:38:07.941710 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="efdd2a38-2a54-411c-8463-3adc8f3cd634" containerName="pull" Nov 28 15:38:07 crc kubenswrapper[4647]: E1128 15:38:07.941724 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efdd2a38-2a54-411c-8463-3adc8f3cd634" containerName="extract" Nov 28 15:38:07 crc kubenswrapper[4647]: I1128 15:38:07.941731 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="efdd2a38-2a54-411c-8463-3adc8f3cd634" containerName="extract" Nov 28 15:38:07 crc kubenswrapper[4647]: I1128 15:38:07.941841 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="efdd2a38-2a54-411c-8463-3adc8f3cd634" containerName="extract" Nov 28 15:38:07 crc kubenswrapper[4647]: I1128 15:38:07.942443 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" Nov 28 15:38:07 crc kubenswrapper[4647]: I1128 15:38:07.945432 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-2x4mg" Nov 28 15:38:08 crc kubenswrapper[4647]: I1128 15:38:08.090559 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz"] Nov 28 15:38:08 crc kubenswrapper[4647]: I1128 15:38:08.110034 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts4jx\" (UniqueName: \"kubernetes.io/projected/c8f72013-5d98-4478-bdd9-180abb82af2c-kube-api-access-ts4jx\") pod \"openstack-operator-controller-operator-857c5c6d5d-pt9fz\" (UID: \"c8f72013-5d98-4478-bdd9-180abb82af2c\") " pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" Nov 28 15:38:08 crc kubenswrapper[4647]: I1128 15:38:08.213129 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts4jx\" (UniqueName: \"kubernetes.io/projected/c8f72013-5d98-4478-bdd9-180abb82af2c-kube-api-access-ts4jx\") pod \"openstack-operator-controller-operator-857c5c6d5d-pt9fz\" (UID: \"c8f72013-5d98-4478-bdd9-180abb82af2c\") " pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" Nov 28 15:38:08 crc kubenswrapper[4647]: I1128 15:38:08.247677 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts4jx\" (UniqueName: \"kubernetes.io/projected/c8f72013-5d98-4478-bdd9-180abb82af2c-kube-api-access-ts4jx\") pod \"openstack-operator-controller-operator-857c5c6d5d-pt9fz\" (UID: \"c8f72013-5d98-4478-bdd9-180abb82af2c\") " pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" Nov 28 15:38:08 crc kubenswrapper[4647]: I1128 15:38:08.258870 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" Nov 28 15:38:08 crc kubenswrapper[4647]: I1128 15:38:08.723107 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz"] Nov 28 15:38:08 crc kubenswrapper[4647]: I1128 15:38:08.890222 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" event={"ID":"c8f72013-5d98-4478-bdd9-180abb82af2c","Type":"ContainerStarted","Data":"8ee32977bf2c2971ea77bfae094d31252059d4fcb1766592c1be01082d458c51"} Nov 28 15:38:13 crc kubenswrapper[4647]: I1128 15:38:13.925893 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" event={"ID":"c8f72013-5d98-4478-bdd9-180abb82af2c","Type":"ContainerStarted","Data":"93def478e366f026837ee3299657063ee4bf7e20bdb20ad8a30f4ce55c969f39"} Nov 28 15:38:15 crc kubenswrapper[4647]: I1128 15:38:15.942690 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" event={"ID":"c8f72013-5d98-4478-bdd9-180abb82af2c","Type":"ContainerStarted","Data":"6405a9c8e0ce9fa3f31306e597a52e0a1a66b3aea9ed7f5df3bf7d10eefdccd7"} Nov 28 15:38:15 crc kubenswrapper[4647]: I1128 15:38:15.943110 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" Nov 28 15:38:15 crc kubenswrapper[4647]: I1128 15:38:15.984081 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" podStartSLOduration=1.927895297 podStartE2EDuration="8.984052301s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.737024831 +0000 UTC m=+818.584631242" lastFinishedPulling="2025-11-28 15:38:15.793181825 +0000 UTC m=+825.640788246" observedRunningTime="2025-11-28 15:38:15.974095987 +0000 UTC m=+825.821702418" watchObservedRunningTime="2025-11-28 15:38:15.984052301 +0000 UTC m=+825.831658732" Nov 28 15:38:18 crc kubenswrapper[4647]: I1128 15:38:18.263201 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-pt9fz" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.190401 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-s88h5"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.192197 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.194670 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.195942 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.196139 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-597p9" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.199311 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-df974" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.215799 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-s88h5"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.219911 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.220889 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.226803 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-sljkv" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.231015 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.242083 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.243430 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.259725 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-skx96" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.263811 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.264781 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.269333 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-mjpxl" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.274774 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.280053 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.289625 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m9nf\" (UniqueName: \"kubernetes.io/projected/205cebfd-f183-486f-965f-ab494cae35dd-kube-api-access-5m9nf\") pod \"cinder-operator-controller-manager-748967c98-s88h5\" (UID: \"205cebfd-f183-486f-965f-ab494cae35dd\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.289707 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnxgb\" (UniqueName: \"kubernetes.io/projected/c27f5305-5c04-401d-b53e-ca2df0999cfd-kube-api-access-fnxgb\") pod \"barbican-operator-controller-manager-5bfbbb859d-qbdbz\" (UID: \"c27f5305-5c04-401d-b53e-ca2df0999cfd\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.289727 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nd2fr\" (UniqueName: \"kubernetes.io/projected/022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619-kube-api-access-nd2fr\") pod \"designate-operator-controller-manager-6788cc6d75-687dh\" (UID: \"022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.291129 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.313542 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.314707 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.320181 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-lpqcs" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.357776 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.361190 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.369388 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.369953 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-qpddb" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.371875 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.399249 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnxgb\" (UniqueName: \"kubernetes.io/projected/c27f5305-5c04-401d-b53e-ca2df0999cfd-kube-api-access-fnxgb\") pod \"barbican-operator-controller-manager-5bfbbb859d-qbdbz\" (UID: \"c27f5305-5c04-401d-b53e-ca2df0999cfd\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.399328 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nd2fr\" (UniqueName: \"kubernetes.io/projected/022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619-kube-api-access-nd2fr\") pod \"designate-operator-controller-manager-6788cc6d75-687dh\" (UID: \"022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.399505 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5688l\" (UniqueName: \"kubernetes.io/projected/2698b76b-928c-4d48-bf4e-e03df478867a-kube-api-access-5688l\") pod \"heat-operator-controller-manager-698d6fd7d6-cpbpp\" (UID: \"2698b76b-928c-4d48-bf4e-e03df478867a\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.399752 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m9nf\" (UniqueName: \"kubernetes.io/projected/205cebfd-f183-486f-965f-ab494cae35dd-kube-api-access-5m9nf\") pod \"cinder-operator-controller-manager-748967c98-s88h5\" (UID: \"205cebfd-f183-486f-965f-ab494cae35dd\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.399785 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzlcl\" (UniqueName: \"kubernetes.io/projected/62caff02-44e5-4ae9-8879-e588e2ec2c26-kube-api-access-kzlcl\") pod \"glance-operator-controller-manager-85fbd69fcd-kdwxt\" (UID: \"62caff02-44e5-4ae9-8879-e588e2ec2c26\") " pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.399868 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4djzn\" (UniqueName: \"kubernetes.io/projected/43fb88ed-c57b-412e-a210-49ce2e7f8848-kube-api-access-4djzn\") pod \"horizon-operator-controller-manager-7d5d9fd47f-w2446\" (UID: \"43fb88ed-c57b-412e-a210-49ce2e7f8848\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.451982 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.458278 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m9nf\" (UniqueName: \"kubernetes.io/projected/205cebfd-f183-486f-965f-ab494cae35dd-kube-api-access-5m9nf\") pod \"cinder-operator-controller-manager-748967c98-s88h5\" (UID: \"205cebfd-f183-486f-965f-ab494cae35dd\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.482474 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-vl88c"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.486009 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nd2fr\" (UniqueName: \"kubernetes.io/projected/022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619-kube-api-access-nd2fr\") pod \"designate-operator-controller-manager-6788cc6d75-687dh\" (UID: \"022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.491261 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnxgb\" (UniqueName: \"kubernetes.io/projected/c27f5305-5c04-401d-b53e-ca2df0999cfd-kube-api-access-fnxgb\") pod \"barbican-operator-controller-manager-5bfbbb859d-qbdbz\" (UID: \"c27f5305-5c04-401d-b53e-ca2df0999cfd\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.492261 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.502361 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzlcl\" (UniqueName: \"kubernetes.io/projected/62caff02-44e5-4ae9-8879-e588e2ec2c26-kube-api-access-kzlcl\") pod \"glance-operator-controller-manager-85fbd69fcd-kdwxt\" (UID: \"62caff02-44e5-4ae9-8879-e588e2ec2c26\") " pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.502458 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4djzn\" (UniqueName: \"kubernetes.io/projected/43fb88ed-c57b-412e-a210-49ce2e7f8848-kube-api-access-4djzn\") pod \"horizon-operator-controller-manager-7d5d9fd47f-w2446\" (UID: \"43fb88ed-c57b-412e-a210-49ce2e7f8848\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.502530 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93230429-04c5-45a9-81c5-dab4213025d4-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-v9m4v\" (UID: \"93230429-04c5-45a9-81c5-dab4213025d4\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.502615 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbwl7\" (UniqueName: \"kubernetes.io/projected/93230429-04c5-45a9-81c5-dab4213025d4-kube-api-access-vbwl7\") pod \"infra-operator-controller-manager-6c55d8d69b-v9m4v\" (UID: \"93230429-04c5-45a9-81c5-dab4213025d4\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.502650 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5688l\" (UniqueName: \"kubernetes.io/projected/2698b76b-928c-4d48-bf4e-e03df478867a-kube-api-access-5688l\") pod \"heat-operator-controller-manager-698d6fd7d6-cpbpp\" (UID: \"2698b76b-928c-4d48-bf4e-e03df478867a\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.510614 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.519040 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-wcfls" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.523906 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.531542 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.541883 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.545803 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.547473 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-6cv5z" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.552567 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzlcl\" (UniqueName: \"kubernetes.io/projected/62caff02-44e5-4ae9-8879-e588e2ec2c26-kube-api-access-kzlcl\") pod \"glance-operator-controller-manager-85fbd69fcd-kdwxt\" (UID: \"62caff02-44e5-4ae9-8879-e588e2ec2c26\") " pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.581996 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5688l\" (UniqueName: \"kubernetes.io/projected/2698b76b-928c-4d48-bf4e-e03df478867a-kube-api-access-5688l\") pod \"heat-operator-controller-manager-698d6fd7d6-cpbpp\" (UID: \"2698b76b-928c-4d48-bf4e-e03df478867a\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.583067 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.592558 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4djzn\" (UniqueName: \"kubernetes.io/projected/43fb88ed-c57b-412e-a210-49ce2e7f8848-kube-api-access-4djzn\") pod \"horizon-operator-controller-manager-7d5d9fd47f-w2446\" (UID: \"43fb88ed-c57b-412e-a210-49ce2e7f8848\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.592983 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-vl88c"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.599035 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.606600 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h2z5\" (UniqueName: \"kubernetes.io/projected/a4c112f9-f801-4aec-b715-72b336978342-kube-api-access-8h2z5\") pod \"keystone-operator-controller-manager-79cc9d59f5-x2ggd\" (UID: \"a4c112f9-f801-4aec-b715-72b336978342\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.606647 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5pn4\" (UniqueName: \"kubernetes.io/projected/6576f1dc-a847-446f-a228-d287036b2d56-kube-api-access-f5pn4\") pod \"ironic-operator-controller-manager-54485f899-vl88c\" (UID: \"6576f1dc-a847-446f-a228-d287036b2d56\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.606688 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93230429-04c5-45a9-81c5-dab4213025d4-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-v9m4v\" (UID: \"93230429-04c5-45a9-81c5-dab4213025d4\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.606721 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbwl7\" (UniqueName: \"kubernetes.io/projected/93230429-04c5-45a9-81c5-dab4213025d4-kube-api-access-vbwl7\") pod \"infra-operator-controller-manager-6c55d8d69b-v9m4v\" (UID: \"93230429-04c5-45a9-81c5-dab4213025d4\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:38:34 crc kubenswrapper[4647]: E1128 15:38:34.607075 4647 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 15:38:34 crc kubenswrapper[4647]: E1128 15:38:34.607123 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93230429-04c5-45a9-81c5-dab4213025d4-cert podName:93230429-04c5-45a9-81c5-dab4213025d4 nodeName:}" failed. No retries permitted until 2025-11-28 15:38:35.107102774 +0000 UTC m=+844.954709195 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/93230429-04c5-45a9-81c5-dab4213025d4-cert") pod "infra-operator-controller-manager-6c55d8d69b-v9m4v" (UID: "93230429-04c5-45a9-81c5-dab4213025d4") : secret "infra-operator-webhook-server-cert" not found Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.607811 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.608014 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.619755 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-wvjbg" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.620356 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.639429 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.642001 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.644854 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.651595 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.656794 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.667809 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-sklrw" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.668344 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-bj4m6" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.675750 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.682613 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbwl7\" (UniqueName: \"kubernetes.io/projected/93230429-04c5-45a9-81c5-dab4213025d4-kube-api-access-vbwl7\") pod \"infra-operator-controller-manager-6c55d8d69b-v9m4v\" (UID: \"93230429-04c5-45a9-81c5-dab4213025d4\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.700795 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.707663 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvprz\" (UniqueName: \"kubernetes.io/projected/d32856fc-f28a-4e36-9e9b-0d09486b8a09-kube-api-access-dvprz\") pod \"mariadb-operator-controller-manager-64d7c556cd-4tgc6\" (UID: \"d32856fc-f28a-4e36-9e9b-0d09486b8a09\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.707719 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg59b\" (UniqueName: \"kubernetes.io/projected/7760330a-6914-44a7-9fa5-aa6e6478506a-kube-api-access-qg59b\") pod \"neutron-operator-controller-manager-58879495c-d9tcm\" (UID: \"7760330a-6914-44a7-9fa5-aa6e6478506a\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.707782 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvqwq\" (UniqueName: \"kubernetes.io/projected/7ff4e4d2-ff33-484b-bc15-f0192f009688-kube-api-access-xvqwq\") pod \"manila-operator-controller-manager-5cbc8c7f96-jnnvb\" (UID: \"7ff4e4d2-ff33-484b-bc15-f0192f009688\") " pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.707839 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h2z5\" (UniqueName: \"kubernetes.io/projected/a4c112f9-f801-4aec-b715-72b336978342-kube-api-access-8h2z5\") pod \"keystone-operator-controller-manager-79cc9d59f5-x2ggd\" (UID: \"a4c112f9-f801-4aec-b715-72b336978342\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.707872 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5pn4\" (UniqueName: \"kubernetes.io/projected/6576f1dc-a847-446f-a228-d287036b2d56-kube-api-access-f5pn4\") pod \"ironic-operator-controller-manager-54485f899-vl88c\" (UID: \"6576f1dc-a847-446f-a228-d287036b2d56\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.731789 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.733251 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.771226 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-ggxcl" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.781428 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h2z5\" (UniqueName: \"kubernetes.io/projected/a4c112f9-f801-4aec-b715-72b336978342-kube-api-access-8h2z5\") pod \"keystone-operator-controller-manager-79cc9d59f5-x2ggd\" (UID: \"a4c112f9-f801-4aec-b715-72b336978342\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.803382 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5pn4\" (UniqueName: \"kubernetes.io/projected/6576f1dc-a847-446f-a228-d287036b2d56-kube-api-access-f5pn4\") pod \"ironic-operator-controller-manager-54485f899-vl88c\" (UID: \"6576f1dc-a847-446f-a228-d287036b2d56\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.810524 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvqwq\" (UniqueName: \"kubernetes.io/projected/7ff4e4d2-ff33-484b-bc15-f0192f009688-kube-api-access-xvqwq\") pod \"manila-operator-controller-manager-5cbc8c7f96-jnnvb\" (UID: \"7ff4e4d2-ff33-484b-bc15-f0192f009688\") " pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.810686 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvj4g\" (UniqueName: \"kubernetes.io/projected/f71ac6d8-b917-43a7-a35c-dce863f16280-kube-api-access-qvj4g\") pod \"nova-operator-controller-manager-79d658b66d-8szbq\" (UID: \"f71ac6d8-b917-43a7-a35c-dce863f16280\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.810787 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvprz\" (UniqueName: \"kubernetes.io/projected/d32856fc-f28a-4e36-9e9b-0d09486b8a09-kube-api-access-dvprz\") pod \"mariadb-operator-controller-manager-64d7c556cd-4tgc6\" (UID: \"d32856fc-f28a-4e36-9e9b-0d09486b8a09\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.810811 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg59b\" (UniqueName: \"kubernetes.io/projected/7760330a-6914-44a7-9fa5-aa6e6478506a-kube-api-access-qg59b\") pod \"neutron-operator-controller-manager-58879495c-d9tcm\" (UID: \"7760330a-6914-44a7-9fa5-aa6e6478506a\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.833055 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.857192 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.861905 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.863132 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.879325 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m"] Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.900321 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.912294 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvj4g\" (UniqueName: \"kubernetes.io/projected/f71ac6d8-b917-43a7-a35c-dce863f16280-kube-api-access-qvj4g\") pod \"nova-operator-controller-manager-79d658b66d-8szbq\" (UID: \"f71ac6d8-b917-43a7-a35c-dce863f16280\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.912336 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rgmj\" (UniqueName: \"kubernetes.io/projected/6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6-kube-api-access-6rgmj\") pod \"octavia-operator-controller-manager-d5fb87cb8-5992m\" (UID: \"6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" Nov 28 15:38:34 crc kubenswrapper[4647]: I1128 15:38:34.923808 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.010220 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-tfjt8" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.013843 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rgmj\" (UniqueName: \"kubernetes.io/projected/6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6-kube-api-access-6rgmj\") pod \"octavia-operator-controller-manager-d5fb87cb8-5992m\" (UID: \"6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.014458 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.015511 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.018017 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvj4g\" (UniqueName: \"kubernetes.io/projected/f71ac6d8-b917-43a7-a35c-dce863f16280-kube-api-access-qvj4g\") pod \"nova-operator-controller-manager-79d658b66d-8szbq\" (UID: \"f71ac6d8-b917-43a7-a35c-dce863f16280\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.021809 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-qj6tf" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.021964 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.024284 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvprz\" (UniqueName: \"kubernetes.io/projected/d32856fc-f28a-4e36-9e9b-0d09486b8a09-kube-api-access-dvprz\") pod \"mariadb-operator-controller-manager-64d7c556cd-4tgc6\" (UID: \"d32856fc-f28a-4e36-9e9b-0d09486b8a09\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.027573 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.029131 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.027589 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.032753 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-dvpjg" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.040802 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rgmj\" (UniqueName: \"kubernetes.io/projected/6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6-kube-api-access-6rgmj\") pod \"octavia-operator-controller-manager-d5fb87cb8-5992m\" (UID: \"6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.073969 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.077056 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg59b\" (UniqueName: \"kubernetes.io/projected/7760330a-6914-44a7-9fa5-aa6e6478506a-kube-api-access-qg59b\") pod \"neutron-operator-controller-manager-58879495c-d9tcm\" (UID: \"7760330a-6914-44a7-9fa5-aa6e6478506a\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.079452 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvqwq\" (UniqueName: \"kubernetes.io/projected/7ff4e4d2-ff33-484b-bc15-f0192f009688-kube-api-access-xvqwq\") pod \"manila-operator-controller-manager-5cbc8c7f96-jnnvb\" (UID: \"7ff4e4d2-ff33-484b-bc15-f0192f009688\") " pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.100980 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.112939 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.117004 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93230429-04c5-45a9-81c5-dab4213025d4-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-v9m4v\" (UID: \"93230429-04c5-45a9-81c5-dab4213025d4\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.117083 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/873a1114-80f7-43f8-b6de-b69a7a152411-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-zg7rq\" (UID: \"873a1114-80f7-43f8-b6de-b69a7a152411\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.117105 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x47mb\" (UniqueName: \"kubernetes.io/projected/04af16a5-b153-433f-9c39-859c16167b0f-kube-api-access-x47mb\") pod \"ovn-operator-controller-manager-5b67cfc8fb-2v9x8\" (UID: \"04af16a5-b153-433f-9c39-859c16167b0f\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.117132 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qct26\" (UniqueName: \"kubernetes.io/projected/873a1114-80f7-43f8-b6de-b69a7a152411-kube-api-access-qct26\") pod \"openstack-baremetal-operator-controller-manager-77868f484-zg7rq\" (UID: \"873a1114-80f7-43f8-b6de-b69a7a152411\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:38:35 crc kubenswrapper[4647]: E1128 15:38:35.117290 4647 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 15:38:35 crc kubenswrapper[4647]: E1128 15:38:35.117340 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93230429-04c5-45a9-81c5-dab4213025d4-cert podName:93230429-04c5-45a9-81c5-dab4213025d4 nodeName:}" failed. No retries permitted until 2025-11-28 15:38:36.117321318 +0000 UTC m=+845.964927739 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/93230429-04c5-45a9-81c5-dab4213025d4-cert") pod "infra-operator-controller-manager-6c55d8d69b-v9m4v" (UID: "93230429-04c5-45a9-81c5-dab4213025d4") : secret "infra-operator-webhook-server-cert" not found Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.201285 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.203718 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.219187 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.230396 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-j66sg" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.262987 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twfth\" (UniqueName: \"kubernetes.io/projected/c2af1d24-9d02-4f14-95b7-3875382cb095-kube-api-access-twfth\") pod \"placement-operator-controller-manager-867d87977b-pzkc9\" (UID: \"c2af1d24-9d02-4f14-95b7-3875382cb095\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.263091 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/873a1114-80f7-43f8-b6de-b69a7a152411-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-zg7rq\" (UID: \"873a1114-80f7-43f8-b6de-b69a7a152411\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.263138 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x47mb\" (UniqueName: \"kubernetes.io/projected/04af16a5-b153-433f-9c39-859c16167b0f-kube-api-access-x47mb\") pod \"ovn-operator-controller-manager-5b67cfc8fb-2v9x8\" (UID: \"04af16a5-b153-433f-9c39-859c16167b0f\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.263945 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qct26\" (UniqueName: \"kubernetes.io/projected/873a1114-80f7-43f8-b6de-b69a7a152411-kube-api-access-qct26\") pod \"openstack-baremetal-operator-controller-manager-77868f484-zg7rq\" (UID: \"873a1114-80f7-43f8-b6de-b69a7a152411\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:38:35 crc kubenswrapper[4647]: E1128 15:38:35.263993 4647 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 15:38:35 crc kubenswrapper[4647]: E1128 15:38:35.264103 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/873a1114-80f7-43f8-b6de-b69a7a152411-cert podName:873a1114-80f7-43f8-b6de-b69a7a152411 nodeName:}" failed. No retries permitted until 2025-11-28 15:38:35.764070725 +0000 UTC m=+845.611677146 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/873a1114-80f7-43f8-b6de-b69a7a152411-cert") pod "openstack-baremetal-operator-controller-manager-77868f484-zg7rq" (UID: "873a1114-80f7-43f8-b6de-b69a7a152411") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.294885 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x47mb\" (UniqueName: \"kubernetes.io/projected/04af16a5-b153-433f-9c39-859c16167b0f-kube-api-access-x47mb\") pod \"ovn-operator-controller-manager-5b67cfc8fb-2v9x8\" (UID: \"04af16a5-b153-433f-9c39-859c16167b0f\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.298166 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.302099 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.312264 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.321774 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-nrwlx" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.329733 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.361049 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.401521 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h4j6\" (UniqueName: \"kubernetes.io/projected/4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f-kube-api-access-9h4j6\") pod \"swift-operator-controller-manager-8f6687c44-zqz4k\" (UID: \"4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.401637 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twfth\" (UniqueName: \"kubernetes.io/projected/c2af1d24-9d02-4f14-95b7-3875382cb095-kube-api-access-twfth\") pod \"placement-operator-controller-manager-867d87977b-pzkc9\" (UID: \"c2af1d24-9d02-4f14-95b7-3875382cb095\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.405536 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qct26\" (UniqueName: \"kubernetes.io/projected/873a1114-80f7-43f8-b6de-b69a7a152411-kube-api-access-qct26\") pod \"openstack-baremetal-operator-controller-manager-77868f484-zg7rq\" (UID: \"873a1114-80f7-43f8-b6de-b69a7a152411\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.407734 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.419445 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.420528 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.423500 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-hf95m" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.431922 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twfth\" (UniqueName: \"kubernetes.io/projected/c2af1d24-9d02-4f14-95b7-3875382cb095-kube-api-access-twfth\") pod \"placement-operator-controller-manager-867d87977b-pzkc9\" (UID: \"c2af1d24-9d02-4f14-95b7-3875382cb095\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.442100 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.506535 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkq8n\" (UniqueName: \"kubernetes.io/projected/5f72c046-071d-4e1c-8e12-6574bed76f27-kube-api-access-qkq8n\") pod \"telemetry-operator-controller-manager-695797c565-rk4n7\" (UID: \"5f72c046-071d-4e1c-8e12-6574bed76f27\") " pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.507028 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h4j6\" (UniqueName: \"kubernetes.io/projected/4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f-kube-api-access-9h4j6\") pod \"swift-operator-controller-manager-8f6687c44-zqz4k\" (UID: \"4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.538149 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.572040 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h4j6\" (UniqueName: \"kubernetes.io/projected/4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f-kube-api-access-9h4j6\") pod \"swift-operator-controller-manager-8f6687c44-zqz4k\" (UID: \"4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.575570 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-bb86466d8-9dc64"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.577395 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.583007 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-gqvsm" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.586459 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.587908 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.590989 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-j7qx9" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.596953 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.611263 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkq8n\" (UniqueName: \"kubernetes.io/projected/5f72c046-071d-4e1c-8e12-6574bed76f27-kube-api-access-qkq8n\") pod \"telemetry-operator-controller-manager-695797c565-rk4n7\" (UID: \"5f72c046-071d-4e1c-8e12-6574bed76f27\") " pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.664692 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkq8n\" (UniqueName: \"kubernetes.io/projected/5f72c046-071d-4e1c-8e12-6574bed76f27-kube-api-access-qkq8n\") pod \"telemetry-operator-controller-manager-695797c565-rk4n7\" (UID: \"5f72c046-071d-4e1c-8e12-6574bed76f27\") " pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.665028 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-bb86466d8-9dc64"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.692743 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.697301 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.702538 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.713342 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfwjq\" (UniqueName: \"kubernetes.io/projected/93200d81-c9c3-4d5e-8406-112eef462119-kube-api-access-pfwjq\") pod \"watcher-operator-controller-manager-6b56b8849f-f4jfb\" (UID: \"93200d81-c9c3-4d5e-8406-112eef462119\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.713403 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qscxn\" (UniqueName: \"kubernetes.io/projected/577fb6a4-bb39-4df2-b161-04b2ac2f44d4-kube-api-access-qscxn\") pod \"test-operator-controller-manager-bb86466d8-9dc64\" (UID: \"577fb6a4-bb39-4df2-b161-04b2ac2f44d4\") " pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.774175 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.775269 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.780338 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.784122 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-k694x" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.787660 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.793653 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.799207 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.801975 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.804894 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-rqxqh" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.814663 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfwjq\" (UniqueName: \"kubernetes.io/projected/93200d81-c9c3-4d5e-8406-112eef462119-kube-api-access-pfwjq\") pod \"watcher-operator-controller-manager-6b56b8849f-f4jfb\" (UID: \"93200d81-c9c3-4d5e-8406-112eef462119\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.815444 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qscxn\" (UniqueName: \"kubernetes.io/projected/577fb6a4-bb39-4df2-b161-04b2ac2f44d4-kube-api-access-qscxn\") pod \"test-operator-controller-manager-bb86466d8-9dc64\" (UID: \"577fb6a4-bb39-4df2-b161-04b2ac2f44d4\") " pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" Nov 28 15:38:35 crc kubenswrapper[4647]: E1128 15:38:35.815613 4647 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 15:38:35 crc kubenswrapper[4647]: E1128 15:38:35.815672 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/873a1114-80f7-43f8-b6de-b69a7a152411-cert podName:873a1114-80f7-43f8-b6de-b69a7a152411 nodeName:}" failed. No retries permitted until 2025-11-28 15:38:36.815639464 +0000 UTC m=+846.663245885 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/873a1114-80f7-43f8-b6de-b69a7a152411-cert") pod "openstack-baremetal-operator-controller-manager-77868f484-zg7rq" (UID: "873a1114-80f7-43f8-b6de-b69a7a152411") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.817232 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/873a1114-80f7-43f8-b6de-b69a7a152411-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-zg7rq\" (UID: \"873a1114-80f7-43f8-b6de-b69a7a152411\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.821136 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7"] Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.847697 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfwjq\" (UniqueName: \"kubernetes.io/projected/93200d81-c9c3-4d5e-8406-112eef462119-kube-api-access-pfwjq\") pod \"watcher-operator-controller-manager-6b56b8849f-f4jfb\" (UID: \"93200d81-c9c3-4d5e-8406-112eef462119\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.859788 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qscxn\" (UniqueName: \"kubernetes.io/projected/577fb6a4-bb39-4df2-b161-04b2ac2f44d4-kube-api-access-qscxn\") pod \"test-operator-controller-manager-bb86466d8-9dc64\" (UID: \"577fb6a4-bb39-4df2-b161-04b2ac2f44d4\") " pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.875131 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.919074 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k9bf\" (UniqueName: \"kubernetes.io/projected/a57f869c-1d71-4341-a632-870e7b3dfede-kube-api-access-8k9bf\") pod \"openstack-operator-controller-manager-69699fdd55-qvs9j\" (UID: \"a57f869c-1d71-4341-a632-870e7b3dfede\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.919123 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqpql\" (UniqueName: \"kubernetes.io/projected/1a8f857d-6498-42ba-bbc5-2bb5b2896c6e-kube-api-access-kqpql\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-kvck7\" (UID: \"1a8f857d-6498-42ba-bbc5-2bb5b2896c6e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7" Nov 28 15:38:35 crc kubenswrapper[4647]: I1128 15:38:35.919253 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a57f869c-1d71-4341-a632-870e7b3dfede-cert\") pod \"openstack-operator-controller-manager-69699fdd55-qvs9j\" (UID: \"a57f869c-1d71-4341-a632-870e7b3dfede\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.021182 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a57f869c-1d71-4341-a632-870e7b3dfede-cert\") pod \"openstack-operator-controller-manager-69699fdd55-qvs9j\" (UID: \"a57f869c-1d71-4341-a632-870e7b3dfede\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.021259 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k9bf\" (UniqueName: \"kubernetes.io/projected/a57f869c-1d71-4341-a632-870e7b3dfede-kube-api-access-8k9bf\") pod \"openstack-operator-controller-manager-69699fdd55-qvs9j\" (UID: \"a57f869c-1d71-4341-a632-870e7b3dfede\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.021300 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqpql\" (UniqueName: \"kubernetes.io/projected/1a8f857d-6498-42ba-bbc5-2bb5b2896c6e-kube-api-access-kqpql\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-kvck7\" (UID: \"1a8f857d-6498-42ba-bbc5-2bb5b2896c6e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7" Nov 28 15:38:36 crc kubenswrapper[4647]: E1128 15:38:36.021570 4647 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 15:38:36 crc kubenswrapper[4647]: E1128 15:38:36.021655 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a57f869c-1d71-4341-a632-870e7b3dfede-cert podName:a57f869c-1d71-4341-a632-870e7b3dfede nodeName:}" failed. No retries permitted until 2025-11-28 15:38:36.5216289 +0000 UTC m=+846.369235321 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a57f869c-1d71-4341-a632-870e7b3dfede-cert") pod "openstack-operator-controller-manager-69699fdd55-qvs9j" (UID: "a57f869c-1d71-4341-a632-870e7b3dfede") : secret "webhook-server-cert" not found Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.048932 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k9bf\" (UniqueName: \"kubernetes.io/projected/a57f869c-1d71-4341-a632-870e7b3dfede-kube-api-access-8k9bf\") pod \"openstack-operator-controller-manager-69699fdd55-qvs9j\" (UID: \"a57f869c-1d71-4341-a632-870e7b3dfede\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.050869 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqpql\" (UniqueName: \"kubernetes.io/projected/1a8f857d-6498-42ba-bbc5-2bb5b2896c6e-kube-api-access-kqpql\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-kvck7\" (UID: \"1a8f857d-6498-42ba-bbc5-2bb5b2896c6e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.103947 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" event={"ID":"2698b76b-928c-4d48-bf4e-e03df478867a","Type":"ContainerStarted","Data":"b834201d98907b96efa725324fad2eb5f218705e36f8640fd1c8dfdea2b7b825"} Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.132576 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93230429-04c5-45a9-81c5-dab4213025d4-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-v9m4v\" (UID: \"93230429-04c5-45a9-81c5-dab4213025d4\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.137112 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93230429-04c5-45a9-81c5-dab4213025d4-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-v9m4v\" (UID: \"93230429-04c5-45a9-81c5-dab4213025d4\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.149142 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.207615 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.262347 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.422356 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz"] Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.465770 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-s88h5"] Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.546124 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a57f869c-1d71-4341-a632-870e7b3dfede-cert\") pod \"openstack-operator-controller-manager-69699fdd55-qvs9j\" (UID: \"a57f869c-1d71-4341-a632-870e7b3dfede\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:36 crc kubenswrapper[4647]: E1128 15:38:36.546580 4647 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 15:38:36 crc kubenswrapper[4647]: E1128 15:38:36.547351 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a57f869c-1d71-4341-a632-870e7b3dfede-cert podName:a57f869c-1d71-4341-a632-870e7b3dfede nodeName:}" failed. No retries permitted until 2025-11-28 15:38:37.547317234 +0000 UTC m=+847.394923655 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a57f869c-1d71-4341-a632-870e7b3dfede-cert") pod "openstack-operator-controller-manager-69699fdd55-qvs9j" (UID: "a57f869c-1d71-4341-a632-870e7b3dfede") : secret "webhook-server-cert" not found Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.801063 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm"] Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.831114 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd"] Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.850885 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/873a1114-80f7-43f8-b6de-b69a7a152411-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-zg7rq\" (UID: \"873a1114-80f7-43f8-b6de-b69a7a152411\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.869463 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446"] Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.879484 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/873a1114-80f7-43f8-b6de-b69a7a152411-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-zg7rq\" (UID: \"873a1114-80f7-43f8-b6de-b69a7a152411\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.903275 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m"] Nov 28 15:38:36 crc kubenswrapper[4647]: W1128 15:38:36.914430 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d6d3f2a_5aa3_4f5f_89c6_2d807c55e6d6.slice/crio-32726a9431d55ad7b5cab6011e24191cf1965556202a6468736ad31d7906674c WatchSource:0}: Error finding container 32726a9431d55ad7b5cab6011e24191cf1965556202a6468736ad31d7906674c: Status 404 returned error can't find the container with id 32726a9431d55ad7b5cab6011e24191cf1965556202a6468736ad31d7906674c Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.941064 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh"] Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.964553 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-vl88c"] Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.970137 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt"] Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.977134 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6"] Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.987229 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb"] Nov 28 15:38:36 crc kubenswrapper[4647]: I1128 15:38:36.991911 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq"] Nov 28 15:38:37 crc kubenswrapper[4647]: W1128 15:38:37.063630 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf71ac6d8_b917_43a7_a35c_dce863f16280.slice/crio-1e5ca7c62ec096cb19cd79b84479c4ffb0910d445c00a529ade31e7c7e98de4c WatchSource:0}: Error finding container 1e5ca7c62ec096cb19cd79b84479c4ffb0910d445c00a529ade31e7c7e98de4c: Status 404 returned error can't find the container with id 1e5ca7c62ec096cb19cd79b84479c4ffb0910d445c00a529ade31e7c7e98de4c Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.092244 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7"] Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.101207 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-bb86466d8-9dc64"] Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.112955 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8"] Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.126600 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qscxn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-bb86466d8-9dc64_openstack-operators(577fb6a4-bb39-4df2-b161-04b2ac2f44d4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:37 crc kubenswrapper[4647]: W1128 15:38:37.144059 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04af16a5_b153_433f_9c39_859c16167b0f.slice/crio-b9b4ad18a47f6191138e00b7e1c078da80ea902c07f91a1fc9c19b226bbe2573 WatchSource:0}: Error finding container b9b4ad18a47f6191138e00b7e1c078da80ea902c07f91a1fc9c19b226bbe2573: Status 404 returned error can't find the container with id b9b4ad18a47f6191138e00b7e1c078da80ea902c07f91a1fc9c19b226bbe2573 Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.144052 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k"] Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.155707 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb"] Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.161987 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:2c837009de6475bc22534827c03df6d8649277b71f1c30de2087b6c52aafb326,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-x47mb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-5b67cfc8fb-2v9x8_openstack-operators(04af16a5-b153-433f-9c39-859c16167b0f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.162474 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" event={"ID":"022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619","Type":"ContainerStarted","Data":"e6043596dba135f60690726ac4616fb8a83b638853bece2230eeec0dc35e3e6b"} Nov 28 15:38:37 crc kubenswrapper[4647]: W1128 15:38:37.168192 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4bc1b1a9_4da1_4778_9aee_ade9fbe4a01f.slice/crio-cbdebae6605efbb8764fac2bc3c446a7c5836c8f91c7f57833abec41b7c41d9c WatchSource:0}: Error finding container cbdebae6605efbb8764fac2bc3c446a7c5836c8f91c7f57833abec41b7c41d9c: Status 404 returned error can't find the container with id cbdebae6605efbb8764fac2bc3c446a7c5836c8f91c7f57833abec41b7c41d9c Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.170182 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" event={"ID":"205cebfd-f183-486f-965f-ab494cae35dd","Type":"ContainerStarted","Data":"ae9f3d2116d33f23e629e5bbc5b18e1a7f8492dea318689d3d4009b60b3bc4b8"} Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.172985 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.178029 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9"] Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.180527 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" event={"ID":"d32856fc-f28a-4e36-9e9b-0d09486b8a09","Type":"ContainerStarted","Data":"beaa5e3f1bd12753699eca212c143a4943878c8787b66bc1a2e0dde2aa4dd94c"} Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.188677 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9h4j6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-8f6687c44-zqz4k_openstack-operators(4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.189558 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" event={"ID":"5f72c046-071d-4e1c-8e12-6574bed76f27","Type":"ContainerStarted","Data":"b0532219f7e21088efb20755cac91dd923bf676415777bf5c630a2c2430971e0"} Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.202448 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v"] Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.214972 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7"] Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.223408 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" event={"ID":"a4c112f9-f801-4aec-b715-72b336978342","Type":"ContainerStarted","Data":"125c902dd1d32a7487849cf00e627555668f6f883b4f82c7f190e2285670d0ff"} Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.227001 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" event={"ID":"c27f5305-5c04-401d-b53e-ca2df0999cfd","Type":"ContainerStarted","Data":"366527def63aa986b4d03fe253effd7a4373991c201e2f5968210ebd9ae74c0c"} Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.231765 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" event={"ID":"7760330a-6914-44a7-9fa5-aa6e6478506a","Type":"ContainerStarted","Data":"2e52e96d65736cf55f0fd273fba663e221de7ee7f26681018424d8cab5c0d03a"} Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.233699 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" event={"ID":"7ff4e4d2-ff33-484b-bc15-f0192f009688","Type":"ContainerStarted","Data":"9a3d186da0890d3c2b2bcf6dcfd2274718112c4eaf0a0b870b22efd391df4053"} Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.234985 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" event={"ID":"6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6","Type":"ContainerStarted","Data":"32726a9431d55ad7b5cab6011e24191cf1965556202a6468736ad31d7906674c"} Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.236538 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-twfth,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-867d87977b-pzkc9_openstack-operators(c2af1d24-9d02-4f14-95b7-3875382cb095): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.236723 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pfwjq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-6b56b8849f-f4jfb_openstack-operators(93200d81-c9c3-4d5e-8406-112eef462119): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.236744 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:e1a731922a2da70b224ce5396602a07cec2b4a79efe7bcdc17c5e4509d16b5e4,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vbwl7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-6c55d8d69b-v9m4v_openstack-operators(93230429-04c5-45a9-81c5-dab4213025d4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.239644 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" event={"ID":"f71ac6d8-b917-43a7-a35c-dce863f16280","Type":"ContainerStarted","Data":"1e5ca7c62ec096cb19cd79b84479c4ffb0910d445c00a529ade31e7c7e98de4c"} Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.241263 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" event={"ID":"43fb88ed-c57b-412e-a210-49ce2e7f8848","Type":"ContainerStarted","Data":"f588ec437c8731f97a9ae0bff3c5c1fb46b91fad6bdf9184c6d073476b42fc90"} Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.252080 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" event={"ID":"6576f1dc-a847-446f-a228-d287036b2d56","Type":"ContainerStarted","Data":"4cdf0f0d8995bdd9f3dfc6bdff7f9d4c4e8170ed68ca00f9561ca2552c2d10ce"} Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.255828 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" event={"ID":"62caff02-44e5-4ae9-8879-e588e2ec2c26","Type":"ContainerStarted","Data":"8efc549a05ea62a9e5fb913c51c3eb6fa1c2d3141e2b2dff166da9a35e3c03fe"} Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.405894 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" podUID="577fb6a4-bb39-4df2-b161-04b2ac2f44d4" Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.507591 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" podUID="04af16a5-b153-433f-9c39-859c16167b0f" Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.568988 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a57f869c-1d71-4341-a632-870e7b3dfede-cert\") pod \"openstack-operator-controller-manager-69699fdd55-qvs9j\" (UID: \"a57f869c-1d71-4341-a632-870e7b3dfede\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.580933 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a57f869c-1d71-4341-a632-870e7b3dfede-cert\") pod \"openstack-operator-controller-manager-69699fdd55-qvs9j\" (UID: \"a57f869c-1d71-4341-a632-870e7b3dfede\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.701262 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq"] Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.704312 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" podUID="4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f" Nov 28 15:38:37 crc kubenswrapper[4647]: I1128 15:38:37.726027 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.765157 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" podUID="93200d81-c9c3-4d5e-8406-112eef462119" Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.824927 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" podUID="c2af1d24-9d02-4f14-95b7-3875382cb095" Nov 28 15:38:37 crc kubenswrapper[4647]: E1128 15:38:37.832220 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" podUID="93230429-04c5-45a9-81c5-dab4213025d4" Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.376024 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" event={"ID":"577fb6a4-bb39-4df2-b161-04b2ac2f44d4","Type":"ContainerStarted","Data":"7c6c2e0f9ac354e9de5f5f74396eaa0ea372cb8621c8553ef6f209c444b20d47"} Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.376098 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" event={"ID":"577fb6a4-bb39-4df2-b161-04b2ac2f44d4","Type":"ContainerStarted","Data":"f5714b34fe507ef406ba565195c0b2e47ad8ca65385c6570d2e6d20ea31519ae"} Nov 28 15:38:38 crc kubenswrapper[4647]: E1128 15:38:38.426574 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391\\\"\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" podUID="577fb6a4-bb39-4df2-b161-04b2ac2f44d4" Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.457012 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" event={"ID":"4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f","Type":"ContainerStarted","Data":"708feed7eb8380ba25d2274741a0dbd35009448f6eacad25a9b2aa80525de373"} Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.457068 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" event={"ID":"4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f","Type":"ContainerStarted","Data":"cbdebae6605efbb8764fac2bc3c446a7c5836c8f91c7f57833abec41b7c41d9c"} Nov 28 15:38:38 crc kubenswrapper[4647]: E1128 15:38:38.466081 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907\\\"\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" podUID="4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f" Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.529100 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" event={"ID":"c2af1d24-9d02-4f14-95b7-3875382cb095","Type":"ContainerStarted","Data":"651ae34d7d8a707f14731881b9193413d92d2771a15ad75b1fad4d913f2c3e24"} Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.529640 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" event={"ID":"c2af1d24-9d02-4f14-95b7-3875382cb095","Type":"ContainerStarted","Data":"c8a0cc2837d6678c557e5b8e267fcf55923dad21be18f782785f0f8d949c452e"} Nov 28 15:38:38 crc kubenswrapper[4647]: E1128 15:38:38.553399 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" podUID="c2af1d24-9d02-4f14-95b7-3875382cb095" Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.571952 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7" event={"ID":"1a8f857d-6498-42ba-bbc5-2bb5b2896c6e","Type":"ContainerStarted","Data":"c34fcb548f9d4ba7e98298f5dac2871b87adfc749c200610da647fbed90164c1"} Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.601158 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" event={"ID":"873a1114-80f7-43f8-b6de-b69a7a152411","Type":"ContainerStarted","Data":"02ed06816b44ca6e150d10ba7ae00613b3779f45a20e0c6f74bac74d6ec4f621"} Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.624710 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" event={"ID":"93230429-04c5-45a9-81c5-dab4213025d4","Type":"ContainerStarted","Data":"ca27bf2031a6fb883898ac37de2e03a88a3efe9ae93677f10c971a22c093c884"} Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.624763 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" event={"ID":"93230429-04c5-45a9-81c5-dab4213025d4","Type":"ContainerStarted","Data":"ff1b111abe8187a1c93a1dce54c28eb29bda7366a10fb16490848c7aa7f3ff46"} Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.638353 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j"] Nov 28 15:38:38 crc kubenswrapper[4647]: E1128 15:38:38.642799 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:e1a731922a2da70b224ce5396602a07cec2b4a79efe7bcdc17c5e4509d16b5e4\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" podUID="93230429-04c5-45a9-81c5-dab4213025d4" Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.669286 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" event={"ID":"93200d81-c9c3-4d5e-8406-112eef462119","Type":"ContainerStarted","Data":"ee6e46f49bb314764203437e3c349dd2c398ff211cddb2d4990f20e154c069c2"} Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.669358 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" event={"ID":"93200d81-c9c3-4d5e-8406-112eef462119","Type":"ContainerStarted","Data":"17e424781cc4171159ecb93e37ccd851a4337bc2c9d655881bdb9ca13b15ec78"} Nov 28 15:38:38 crc kubenswrapper[4647]: E1128 15:38:38.671739 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" podUID="93200d81-c9c3-4d5e-8406-112eef462119" Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.679662 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" event={"ID":"04af16a5-b153-433f-9c39-859c16167b0f","Type":"ContainerStarted","Data":"871477f88eb5412864e7eb5a78b30c051e249466ad66310826ff20e163847192"} Nov 28 15:38:38 crc kubenswrapper[4647]: I1128 15:38:38.679738 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" event={"ID":"04af16a5-b153-433f-9c39-859c16167b0f","Type":"ContainerStarted","Data":"b9b4ad18a47f6191138e00b7e1c078da80ea902c07f91a1fc9c19b226bbe2573"} Nov 28 15:38:38 crc kubenswrapper[4647]: E1128 15:38:38.683347 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:2c837009de6475bc22534827c03df6d8649277b71f1c30de2087b6c52aafb326\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" podUID="04af16a5-b153-433f-9c39-859c16167b0f" Nov 28 15:38:39 crc kubenswrapper[4647]: I1128 15:38:39.799600 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" event={"ID":"a57f869c-1d71-4341-a632-870e7b3dfede","Type":"ContainerStarted","Data":"20e564d774aa1b4f818add7059e859cac60cc6c133c07a542cd39415f75a7b01"} Nov 28 15:38:39 crc kubenswrapper[4647]: I1128 15:38:39.800030 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" event={"ID":"a57f869c-1d71-4341-a632-870e7b3dfede","Type":"ContainerStarted","Data":"c3a7b20de57ea565cd912f75acf7cac78553d6a03ec6620a3d0bf5ed183037a5"} Nov 28 15:38:39 crc kubenswrapper[4647]: I1128 15:38:39.800047 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" event={"ID":"a57f869c-1d71-4341-a632-870e7b3dfede","Type":"ContainerStarted","Data":"c7750d8518bb33970f8a476a60454621b2aff9053493cf6bb16679258f6de708"} Nov 28 15:38:39 crc kubenswrapper[4647]: I1128 15:38:39.800065 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:39 crc kubenswrapper[4647]: E1128 15:38:39.814170 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" podUID="93200d81-c9c3-4d5e-8406-112eef462119" Nov 28 15:38:39 crc kubenswrapper[4647]: E1128 15:38:39.814958 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391\\\"\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" podUID="577fb6a4-bb39-4df2-b161-04b2ac2f44d4" Nov 28 15:38:39 crc kubenswrapper[4647]: E1128 15:38:39.815116 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:2c837009de6475bc22534827c03df6d8649277b71f1c30de2087b6c52aafb326\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" podUID="04af16a5-b153-433f-9c39-859c16167b0f" Nov 28 15:38:39 crc kubenswrapper[4647]: E1128 15:38:39.815273 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907\\\"\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" podUID="4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f" Nov 28 15:38:39 crc kubenswrapper[4647]: E1128 15:38:39.815344 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:e1a731922a2da70b224ce5396602a07cec2b4a79efe7bcdc17c5e4509d16b5e4\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" podUID="93230429-04c5-45a9-81c5-dab4213025d4" Nov 28 15:38:39 crc kubenswrapper[4647]: E1128 15:38:39.815831 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" podUID="c2af1d24-9d02-4f14-95b7-3875382cb095" Nov 28 15:38:39 crc kubenswrapper[4647]: I1128 15:38:39.994614 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" podStartSLOduration=4.9945970299999995 podStartE2EDuration="4.99459703s" podCreationTimestamp="2025-11-28 15:38:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:38:39.992104574 +0000 UTC m=+849.839710995" watchObservedRunningTime="2025-11-28 15:38:39.99459703 +0000 UTC m=+849.842203451" Nov 28 15:38:47 crc kubenswrapper[4647]: I1128 15:38:47.736983 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-qvs9j" Nov 28 15:38:50 crc kubenswrapper[4647]: E1128 15:38:50.919458 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:9aee58b2ca71ef9c4f12373090951090d13aa7038d0fef07ec30167f3d6ae23c" Nov 28 15:38:50 crc kubenswrapper[4647]: E1128 15:38:50.919891 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:9aee58b2ca71ef9c4f12373090951090d13aa7038d0fef07ec30167f3d6ae23c,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-f5pn4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-54485f899-vl88c_openstack-operators(6576f1dc-a847-446f-a228-d287036b2d56): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:38:54 crc kubenswrapper[4647]: E1128 15:38:54.131187 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:78d91c3cdd5eda41c2cd6d4a8491844e161dc33f6221be8cb822b2107d7ff46f" Nov 28 15:38:54 crc kubenswrapper[4647]: E1128 15:38:54.131652 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:78d91c3cdd5eda41c2cd6d4a8491844e161dc33f6221be8cb822b2107d7ff46f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qkq8n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-695797c565-rk4n7_openstack-operators(5f72c046-071d-4e1c-8e12-6574bed76f27): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:38:58 crc kubenswrapper[4647]: E1128 15:38:58.536353 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:44c6dcec0d489a675c35e097d92729162bfc2a8cac62d7c8376943ef922e2651" Nov 28 15:38:58 crc kubenswrapper[4647]: E1128 15:38:58.537374 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:44c6dcec0d489a675c35e097d92729162bfc2a8cac62d7c8376943ef922e2651,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5m9nf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-748967c98-s88h5_openstack-operators(205cebfd-f183-486f-965f-ab494cae35dd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:38:59 crc kubenswrapper[4647]: E1128 15:38:59.692711 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:4f799c74da2f1c864af24fcd5efd91ec64848972a95246eac6b5c6c4d71c1756" Nov 28 15:38:59 crc kubenswrapper[4647]: E1128 15:38:59.693175 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:4f799c74da2f1c864af24fcd5efd91ec64848972a95246eac6b5c6c4d71c1756,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8h2z5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-79cc9d59f5-x2ggd_openstack-operators(a4c112f9-f801-4aec-b715-72b336978342): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:01 crc kubenswrapper[4647]: E1128 15:39:01.723092 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b" Nov 28 15:39:01 crc kubenswrapper[4647]: E1128 15:39:01.725490 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qct26,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-77868f484-zg7rq_openstack-operators(873a1114-80f7-43f8-b6de-b69a7a152411): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:02 crc kubenswrapper[4647]: E1128 15:39:02.205824 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:2811f492f5663ec8660767dcb699060691c10dd809b1bb5f3a1f6b803946a653" Nov 28 15:39:02 crc kubenswrapper[4647]: E1128 15:39:02.206078 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:2811f492f5663ec8660767dcb699060691c10dd809b1bb5f3a1f6b803946a653,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4djzn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-7d5d9fd47f-w2446_openstack-operators(43fb88ed-c57b-412e-a210-49ce2e7f8848): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:02 crc kubenswrapper[4647]: E1128 15:39:02.710621 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:c5394efcfaeddc4231f98f1ed5267b77a8687038064cfb4302bcd0c8d6587856" Nov 28 15:39:02 crc kubenswrapper[4647]: E1128 15:39:02.710855 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:c5394efcfaeddc4231f98f1ed5267b77a8687038064cfb4302bcd0c8d6587856,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fnxgb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-5bfbbb859d-qbdbz_openstack-operators(c27f5305-5c04-401d-b53e-ca2df0999cfd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:04 crc kubenswrapper[4647]: E1128 15:39:04.755373 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:8aaaf8bb0a81358ee196af922d534c9b3f6bb47b27f4283087f7e0254638a671" Nov 28 15:39:04 crc kubenswrapper[4647]: E1128 15:39:04.756397 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:8aaaf8bb0a81358ee196af922d534c9b3f6bb47b27f4283087f7e0254638a671,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nd2fr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-6788cc6d75-687dh_openstack-operators(022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:05 crc kubenswrapper[4647]: E1128 15:39:05.216895 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:5245e851b4476baecd4173eca3e8669ac09ec69d36ad1ebc3a0f867713cbc14b" Nov 28 15:39:05 crc kubenswrapper[4647]: E1128 15:39:05.217085 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:5245e851b4476baecd4173eca3e8669ac09ec69d36ad1ebc3a0f867713cbc14b,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6rgmj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-d5fb87cb8-5992m_openstack-operators(6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:05 crc kubenswrapper[4647]: E1128 15:39:05.907356 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d" Nov 28 15:39:05 crc kubenswrapper[4647]: E1128 15:39:05.907616 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-twfth,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-867d87977b-pzkc9_openstack-operators(c2af1d24-9d02-4f14-95b7-3875382cb095): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:05 crc kubenswrapper[4647]: E1128 15:39:05.908857 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" podUID="c2af1d24-9d02-4f14-95b7-3875382cb095" Nov 28 15:39:06 crc kubenswrapper[4647]: E1128 15:39:06.619548 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 28 15:39:06 crc kubenswrapper[4647]: E1128 15:39:06.619779 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kqpql,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-kvck7_openstack-operators(1a8f857d-6498-42ba-bbc5-2bb5b2896c6e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:06 crc kubenswrapper[4647]: E1128 15:39:06.621027 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7" podUID="1a8f857d-6498-42ba-bbc5-2bb5b2896c6e" Nov 28 15:39:07 crc kubenswrapper[4647]: E1128 15:39:07.018628 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7" podUID="1a8f857d-6498-42ba-bbc5-2bb5b2896c6e" Nov 28 15:39:12 crc kubenswrapper[4647]: E1128 15:39:12.825914 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391" Nov 28 15:39:12 crc kubenswrapper[4647]: E1128 15:39:12.826905 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qscxn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-bb86466d8-9dc64_openstack-operators(577fb6a4-bb39-4df2-b161-04b2ac2f44d4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:12 crc kubenswrapper[4647]: E1128 15:39:12.828045 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" podUID="577fb6a4-bb39-4df2-b161-04b2ac2f44d4" Nov 28 15:39:13 crc kubenswrapper[4647]: E1128 15:39:13.420495 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:57d9cb0034a7d5c7a39410fcb619ade2010e6855344dc3a0bc2bfd98cdf345d8" Nov 28 15:39:13 crc kubenswrapper[4647]: E1128 15:39:13.421076 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:57d9cb0034a7d5c7a39410fcb619ade2010e6855344dc3a0bc2bfd98cdf345d8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xvqwq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5cbc8c7f96-jnnvb_openstack-operators(7ff4e4d2-ff33-484b-bc15-f0192f009688): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:13 crc kubenswrapper[4647]: E1128 15:39:13.868578 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a" Nov 28 15:39:13 crc kubenswrapper[4647]: E1128 15:39:13.868763 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pfwjq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-6b56b8849f-f4jfb_openstack-operators(93200d81-c9c3-4d5e-8406-112eef462119): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:13 crc kubenswrapper[4647]: E1128 15:39:13.871207 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" podUID="93200d81-c9c3-4d5e-8406-112eef462119" Nov 28 15:39:14 crc kubenswrapper[4647]: E1128 15:39:14.358850 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" podUID="6576f1dc-a847-446f-a228-d287036b2d56" Nov 28 15:39:14 crc kubenswrapper[4647]: E1128 15:39:14.411712 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" podUID="6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6" Nov 28 15:39:14 crc kubenswrapper[4647]: E1128 15:39:14.416729 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" podUID="873a1114-80f7-43f8-b6de-b69a7a152411" Nov 28 15:39:14 crc kubenswrapper[4647]: E1128 15:39:14.472174 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" podUID="c27f5305-5c04-401d-b53e-ca2df0999cfd" Nov 28 15:39:14 crc kubenswrapper[4647]: E1128 15:39:14.517425 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" podUID="43fb88ed-c57b-412e-a210-49ce2e7f8848" Nov 28 15:39:14 crc kubenswrapper[4647]: E1128 15:39:14.517468 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" podUID="022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619" Nov 28 15:39:14 crc kubenswrapper[4647]: E1128 15:39:14.546144 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" podUID="a4c112f9-f801-4aec-b715-72b336978342" Nov 28 15:39:14 crc kubenswrapper[4647]: E1128 15:39:14.596569 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" podUID="205cebfd-f183-486f-965f-ab494cae35dd" Nov 28 15:39:14 crc kubenswrapper[4647]: E1128 15:39:14.599789 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" podUID="5f72c046-071d-4e1c-8e12-6574bed76f27" Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.105208 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" event={"ID":"a4c112f9-f801-4aec-b715-72b336978342","Type":"ContainerStarted","Data":"e51bf99506a821634f02483c693ebf774a391fb20a145352b25023653076956e"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.112389 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" event={"ID":"022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619","Type":"ContainerStarted","Data":"3e5a189e031528d1c2660479217800bba77db67cf313859ce1a41489482d0fe0"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.121093 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" event={"ID":"6576f1dc-a847-446f-a228-d287036b2d56","Type":"ContainerStarted","Data":"e1e869a1e037f24c15f13e6f7efa2bf224edd66d7a5cb04a3ad865d58c084181"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.126131 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" event={"ID":"43fb88ed-c57b-412e-a210-49ce2e7f8848","Type":"ContainerStarted","Data":"2e695c0859b85b23e89838b7e0804b19b431277592594d0380bd82bd18dd5057"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.128589 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" event={"ID":"5f72c046-071d-4e1c-8e12-6574bed76f27","Type":"ContainerStarted","Data":"4e8bb4ed308b13e237579ab62d4a1ad1c91fd1c163ba15c254782e1d07960749"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.137560 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" event={"ID":"873a1114-80f7-43f8-b6de-b69a7a152411","Type":"ContainerStarted","Data":"6edb9f8223e48a04c7c3fe0b9fe9c8cec1098cd434740f5134e8180643886cd9"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.148657 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" event={"ID":"2698b76b-928c-4d48-bf4e-e03df478867a","Type":"ContainerStarted","Data":"59cb7592e311bdab6d9a4dcb37935688636090d77c3221a98a1268a695c43aeb"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.148718 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" event={"ID":"2698b76b-928c-4d48-bf4e-e03df478867a","Type":"ContainerStarted","Data":"3d849b07980b6710a50444a9b99842d89dde1620d957d4c3f59ec15c0420aef8"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.148838 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.158847 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" event={"ID":"4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f","Type":"ContainerStarted","Data":"8ac0f5403ecd8162f87051e25db4c8aea19a0cc4ed61fea72bb3e08731b45596"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.159134 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.164886 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" event={"ID":"f71ac6d8-b917-43a7-a35c-dce863f16280","Type":"ContainerStarted","Data":"b1dcd865618848b8d91b828466f0159899ece93a53819c8272edd239b8126ab0"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.173825 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" event={"ID":"205cebfd-f183-486f-965f-ab494cae35dd","Type":"ContainerStarted","Data":"0240ab4c5087c46012c9727579753818af77ff6bb054be8bf63b78110ef65ccc"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.184338 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" event={"ID":"93230429-04c5-45a9-81c5-dab4213025d4","Type":"ContainerStarted","Data":"662dae271b538b405f6e61e303d85d59378c1d9f0b0bd2d0bc6a49d14714d30d"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.185101 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.194493 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" event={"ID":"d32856fc-f28a-4e36-9e9b-0d09486b8a09","Type":"ContainerStarted","Data":"ed48973933d796264973229034c51e36e752066c76e8f12e2e9280ccab1e55ac"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.201690 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" event={"ID":"62caff02-44e5-4ae9-8879-e588e2ec2c26","Type":"ContainerStarted","Data":"a3b73e6e458e2a122f01502054f594469b56c1d15eab229b5a0d03ff54708c77"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.201746 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" event={"ID":"62caff02-44e5-4ae9-8879-e588e2ec2c26","Type":"ContainerStarted","Data":"800e00a4f4f87606b9f75f70180eaf129e9ab1c08370ade7c09e2610cdf30607"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.219042 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" event={"ID":"7760330a-6914-44a7-9fa5-aa6e6478506a","Type":"ContainerStarted","Data":"0375b804f9bc472424df581c6bba4831d82d9ca1fc1b83462cb274907133313e"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.230299 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" podStartSLOduration=9.506272433 podStartE2EDuration="41.230245555s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:35.935629802 +0000 UTC m=+845.783236223" lastFinishedPulling="2025-11-28 15:39:07.659602924 +0000 UTC m=+877.507209345" observedRunningTime="2025-11-28 15:39:15.219445519 +0000 UTC m=+885.067051940" watchObservedRunningTime="2025-11-28 15:39:15.230245555 +0000 UTC m=+885.077851976" Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.233990 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" event={"ID":"7ff4e4d2-ff33-484b-bc15-f0192f009688","Type":"ContainerStarted","Data":"47db85f0bf4583063e17ce1517ec67fe052255bb2c49cb4439a878e565ec574b"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.253795 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" event={"ID":"c27f5305-5c04-401d-b53e-ca2df0999cfd","Type":"ContainerStarted","Data":"57421f7555ba7c083e77dad764cd27a2eb19172b6227ab2b7a632b4d139f53df"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.267761 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" event={"ID":"04af16a5-b153-433f-9c39-859c16167b0f","Type":"ContainerStarted","Data":"ac7de5eb645e3e8e501cf49d6507e29eb94961045c9abfd13bf4d9f07e17fcc0"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.268444 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.278473 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" podStartSLOduration=4.563668849 podStartE2EDuration="41.278454572s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.188515477 +0000 UTC m=+847.036121898" lastFinishedPulling="2025-11-28 15:39:13.9033012 +0000 UTC m=+883.750907621" observedRunningTime="2025-11-28 15:39:15.249027783 +0000 UTC m=+885.096634204" watchObservedRunningTime="2025-11-28 15:39:15.278454572 +0000 UTC m=+885.126060993" Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.283714 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" event={"ID":"6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6","Type":"ContainerStarted","Data":"565e00ae0ca3d6e0d2fa7a941039000ae3eb554a14eaa3fd797fdfcc12965f33"} Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.672842 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" podStartSLOduration=4.931248135 podStartE2EDuration="41.672827358s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.161723417 +0000 UTC m=+847.009329838" lastFinishedPulling="2025-11-28 15:39:13.90330264 +0000 UTC m=+883.750909061" observedRunningTime="2025-11-28 15:39:15.640988545 +0000 UTC m=+885.488594966" watchObservedRunningTime="2025-11-28 15:39:15.672827358 +0000 UTC m=+885.520433779" Nov 28 15:39:15 crc kubenswrapper[4647]: I1128 15:39:15.673058 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" podStartSLOduration=5.036901724 podStartE2EDuration="41.673053444s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.236374695 +0000 UTC m=+847.083981116" lastFinishedPulling="2025-11-28 15:39:13.872526415 +0000 UTC m=+883.720132836" observedRunningTime="2025-11-28 15:39:15.66801134 +0000 UTC m=+885.515617761" watchObservedRunningTime="2025-11-28 15:39:15.673053444 +0000 UTC m=+885.520659855" Nov 28 15:39:16 crc kubenswrapper[4647]: E1128 15:39:16.089930 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" podUID="7ff4e4d2-ff33-484b-bc15-f0192f009688" Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.292851 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" event={"ID":"7760330a-6914-44a7-9fa5-aa6e6478506a","Type":"ContainerStarted","Data":"143a0ea500a72f2cc08b0a2a5da2439ebccdc7646e1a983dd207bbafc7414ee1"} Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.293249 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.294592 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" event={"ID":"f71ac6d8-b917-43a7-a35c-dce863f16280","Type":"ContainerStarted","Data":"65ed813c391699e51f5fb225971d360be2e6a6aa18b22c740b564c5cf4fc0f6c"} Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.294996 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.297152 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" event={"ID":"d32856fc-f28a-4e36-9e9b-0d09486b8a09","Type":"ContainerStarted","Data":"bf513b5d7392b01df225e4a249314edd1a9b929de81821420bd153baa092217b"} Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.297179 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.298497 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.323164 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" podStartSLOduration=11.471629249 podStartE2EDuration="42.323138353s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:36.80807572 +0000 UTC m=+846.655682141" lastFinishedPulling="2025-11-28 15:39:07.659584824 +0000 UTC m=+877.507191245" observedRunningTime="2025-11-28 15:39:16.31776549 +0000 UTC m=+886.165371921" watchObservedRunningTime="2025-11-28 15:39:16.323138353 +0000 UTC m=+886.170744774" Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.345624 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" podStartSLOduration=11.761781965 podStartE2EDuration="42.345600038s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.076192632 +0000 UTC m=+846.923799053" lastFinishedPulling="2025-11-28 15:39:07.660010705 +0000 UTC m=+877.507617126" observedRunningTime="2025-11-28 15:39:16.337998776 +0000 UTC m=+886.185605197" watchObservedRunningTime="2025-11-28 15:39:16.345600038 +0000 UTC m=+886.193206459" Nov 28 15:39:16 crc kubenswrapper[4647]: E1128 15:39:16.383042 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:57d9cb0034a7d5c7a39410fcb619ade2010e6855344dc3a0bc2bfd98cdf345d8\\\"\"" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" podUID="7ff4e4d2-ff33-484b-bc15-f0192f009688" Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.385239 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" podStartSLOduration=11.704335952 podStartE2EDuration="42.385223177s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:36.978960056 +0000 UTC m=+846.826566477" lastFinishedPulling="2025-11-28 15:39:07.659847281 +0000 UTC m=+877.507453702" observedRunningTime="2025-11-28 15:39:16.383856361 +0000 UTC m=+886.231462782" watchObservedRunningTime="2025-11-28 15:39:16.385223177 +0000 UTC m=+886.232829598" Nov 28 15:39:16 crc kubenswrapper[4647]: I1128 15:39:16.418478 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" podStartSLOduration=11.70766347 podStartE2EDuration="42.418451347s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:36.948568011 +0000 UTC m=+846.796174432" lastFinishedPulling="2025-11-28 15:39:07.659355888 +0000 UTC m=+877.506962309" observedRunningTime="2025-11-28 15:39:16.414162964 +0000 UTC m=+886.261769385" watchObservedRunningTime="2025-11-28 15:39:16.418451347 +0000 UTC m=+886.266057768" Nov 28 15:39:17 crc kubenswrapper[4647]: I1128 15:39:17.022954 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:39:17 crc kubenswrapper[4647]: I1128 15:39:17.023038 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:39:17 crc kubenswrapper[4647]: I1128 15:39:17.310070 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" event={"ID":"a4c112f9-f801-4aec-b715-72b336978342","Type":"ContainerStarted","Data":"91d6be61a1a3d527004d8202567c600e6791aa6307e29ff6da271e76f3f623b7"} Nov 28 15:39:17 crc kubenswrapper[4647]: I1128 15:39:17.311934 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" Nov 28 15:39:17 crc kubenswrapper[4647]: I1128 15:39:17.344265 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" podStartSLOduration=3.986334489 podStartE2EDuration="43.344238959s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:36.843362865 +0000 UTC m=+846.690969286" lastFinishedPulling="2025-11-28 15:39:16.201267335 +0000 UTC m=+886.048873756" observedRunningTime="2025-11-28 15:39:17.337069309 +0000 UTC m=+887.184675740" watchObservedRunningTime="2025-11-28 15:39:17.344238959 +0000 UTC m=+887.191845380" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.135503 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2p9wb"] Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.137322 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.158604 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2p9wb"] Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.317695 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" event={"ID":"6576f1dc-a847-446f-a228-d287036b2d56","Type":"ContainerStarted","Data":"6e4977435270d8f75549a287882fa7c7b611b0c065fac973e70ef35de1f06d7e"} Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.319455 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" event={"ID":"5f72c046-071d-4e1c-8e12-6574bed76f27","Type":"ContainerStarted","Data":"5183fa0b8b8a42f810bd8ee683bcd05f5b671b5e671c3a6fd63706d9bfc1ca3b"} Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.329638 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-utilities\") pod \"certified-operators-2p9wb\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.329726 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-catalog-content\") pod \"certified-operators-2p9wb\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.329848 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxdxx\" (UniqueName: \"kubernetes.io/projected/7410e071-f024-4355-b913-d7910b75ad42-kube-api-access-nxdxx\") pod \"certified-operators-2p9wb\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.337536 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" podStartSLOduration=4.550967204 podStartE2EDuration="44.337519868s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.113805278 +0000 UTC m=+846.961411699" lastFinishedPulling="2025-11-28 15:39:16.900357942 +0000 UTC m=+886.747964363" observedRunningTime="2025-11-28 15:39:18.334048856 +0000 UTC m=+888.181655277" watchObservedRunningTime="2025-11-28 15:39:18.337519868 +0000 UTC m=+888.185126289" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.431237 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxdxx\" (UniqueName: \"kubernetes.io/projected/7410e071-f024-4355-b913-d7910b75ad42-kube-api-access-nxdxx\") pod \"certified-operators-2p9wb\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.431350 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-utilities\") pod \"certified-operators-2p9wb\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.431396 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-catalog-content\") pod \"certified-operators-2p9wb\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.431866 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-catalog-content\") pod \"certified-operators-2p9wb\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.432649 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-utilities\") pod \"certified-operators-2p9wb\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.457083 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxdxx\" (UniqueName: \"kubernetes.io/projected/7410e071-f024-4355-b913-d7910b75ad42-kube-api-access-nxdxx\") pod \"certified-operators-2p9wb\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:18 crc kubenswrapper[4647]: I1128 15:39:18.751209 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.327023 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" event={"ID":"873a1114-80f7-43f8-b6de-b69a7a152411","Type":"ContainerStarted","Data":"bba6c60da46ff4d3e55d67be142dd644cb2a6e17e4211fad3696f4f24470e818"} Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.327511 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.329260 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" event={"ID":"6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6","Type":"ContainerStarted","Data":"e9dc7dde10ded4b7366c3790ed1643451c31d07123a9f91f26770189867a77d1"} Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.329354 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.330563 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" event={"ID":"c27f5305-5c04-401d-b53e-ca2df0999cfd","Type":"ContainerStarted","Data":"a06cb89f56966c6da217728ef0a63ad56d99d2f36e28cc07b28f4ef8726d009d"} Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.330678 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.332428 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" event={"ID":"022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619","Type":"ContainerStarted","Data":"dc8f67e9c1678cd457f64d98691c691e830fa1d2d7576d9a62bafea6ac4a9b05"} Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.332641 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.334503 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" event={"ID":"43fb88ed-c57b-412e-a210-49ce2e7f8848","Type":"ContainerStarted","Data":"bd770f94d27af90b29b14094a86bf396ff605775f740fa51f7a12a67704bf254"} Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.334628 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.336319 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" event={"ID":"205cebfd-f183-486f-965f-ab494cae35dd","Type":"ContainerStarted","Data":"d7e185ca750205897c580807d4414e04ce45cede4d8833aa9f2b0efd790a2a4b"} Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.336444 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.336471 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.336616 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.356145 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" podStartSLOduration=5.651634586 podStartE2EDuration="45.356129226s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.772847184 +0000 UTC m=+847.620453605" lastFinishedPulling="2025-11-28 15:39:17.477341804 +0000 UTC m=+887.324948245" observedRunningTime="2025-11-28 15:39:19.354617716 +0000 UTC m=+889.202224137" watchObservedRunningTime="2025-11-28 15:39:19.356129226 +0000 UTC m=+889.203735647" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.374867 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" podStartSLOduration=4.328911682 podStartE2EDuration="45.374847422s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:36.439299133 +0000 UTC m=+846.286905554" lastFinishedPulling="2025-11-28 15:39:17.485234863 +0000 UTC m=+887.332841294" observedRunningTime="2025-11-28 15:39:19.37440084 +0000 UTC m=+889.222007261" watchObservedRunningTime="2025-11-28 15:39:19.374847422 +0000 UTC m=+889.222453843" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.390990 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" podStartSLOduration=4.867149887 podStartE2EDuration="45.390972839s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:36.961245667 +0000 UTC m=+846.808852088" lastFinishedPulling="2025-11-28 15:39:17.485068609 +0000 UTC m=+887.332675040" observedRunningTime="2025-11-28 15:39:19.387806095 +0000 UTC m=+889.235412516" watchObservedRunningTime="2025-11-28 15:39:19.390972839 +0000 UTC m=+889.238579260" Nov 28 15:39:19 crc kubenswrapper[4647]: E1128 15:39:19.396237 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" podUID="c2af1d24-9d02-4f14-95b7-3875382cb095" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.407866 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" podStartSLOduration=6.18712994 podStartE2EDuration="45.407849456s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:36.982612543 +0000 UTC m=+846.830218964" lastFinishedPulling="2025-11-28 15:39:16.203332059 +0000 UTC m=+886.050938480" observedRunningTime="2025-11-28 15:39:19.403332827 +0000 UTC m=+889.250939248" watchObservedRunningTime="2025-11-28 15:39:19.407849456 +0000 UTC m=+889.255455877" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.427400 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" podStartSLOduration=4.899039442 podStartE2EDuration="45.427384524s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:36.948939601 +0000 UTC m=+846.796546022" lastFinishedPulling="2025-11-28 15:39:17.477284673 +0000 UTC m=+887.324891104" observedRunningTime="2025-11-28 15:39:19.426895621 +0000 UTC m=+889.274502042" watchObservedRunningTime="2025-11-28 15:39:19.427384524 +0000 UTC m=+889.274990945" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.443736 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" podStartSLOduration=4.853378063 podStartE2EDuration="45.443702896s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:36.895761963 +0000 UTC m=+846.743368384" lastFinishedPulling="2025-11-28 15:39:17.486086786 +0000 UTC m=+887.333693217" observedRunningTime="2025-11-28 15:39:19.440592504 +0000 UTC m=+889.288198925" watchObservedRunningTime="2025-11-28 15:39:19.443702896 +0000 UTC m=+889.291309317" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.463668 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" podStartSLOduration=4.4662393080000005 podStartE2EDuration="45.463651274s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:36.487651943 +0000 UTC m=+846.335258364" lastFinishedPulling="2025-11-28 15:39:17.485063899 +0000 UTC m=+887.332670330" observedRunningTime="2025-11-28 15:39:19.457886852 +0000 UTC m=+889.305493263" watchObservedRunningTime="2025-11-28 15:39:19.463651274 +0000 UTC m=+889.311257695" Nov 28 15:39:19 crc kubenswrapper[4647]: I1128 15:39:19.490625 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2p9wb"] Nov 28 15:39:20 crc kubenswrapper[4647]: I1128 15:39:20.380233 4647 generic.go:334] "Generic (PLEG): container finished" podID="7410e071-f024-4355-b913-d7910b75ad42" containerID="8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737" exitCode=0 Nov 28 15:39:20 crc kubenswrapper[4647]: I1128 15:39:20.382003 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2p9wb" event={"ID":"7410e071-f024-4355-b913-d7910b75ad42","Type":"ContainerDied","Data":"8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737"} Nov 28 15:39:20 crc kubenswrapper[4647]: I1128 15:39:20.382073 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2p9wb" event={"ID":"7410e071-f024-4355-b913-d7910b75ad42","Type":"ContainerStarted","Data":"552adb33ac15bb0ea5852581bd245cbf72ca44330e386afa1dd851ae199c3091"} Nov 28 15:39:22 crc kubenswrapper[4647]: I1128 15:39:22.920642 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-klqmn"] Nov 28 15:39:22 crc kubenswrapper[4647]: I1128 15:39:22.924884 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:22 crc kubenswrapper[4647]: I1128 15:39:22.929224 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-klqmn"] Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.073003 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-catalog-content\") pod \"redhat-operators-klqmn\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.073453 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45r6k\" (UniqueName: \"kubernetes.io/projected/666576c6-4d97-4bb5-a991-a89071ebff6d-kube-api-access-45r6k\") pod \"redhat-operators-klqmn\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.073520 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-utilities\") pod \"redhat-operators-klqmn\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.174349 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45r6k\" (UniqueName: \"kubernetes.io/projected/666576c6-4d97-4bb5-a991-a89071ebff6d-kube-api-access-45r6k\") pod \"redhat-operators-klqmn\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.174630 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-utilities\") pod \"redhat-operators-klqmn\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.174753 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-catalog-content\") pod \"redhat-operators-klqmn\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.175170 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-utilities\") pod \"redhat-operators-klqmn\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.175206 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-catalog-content\") pod \"redhat-operators-klqmn\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.197559 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45r6k\" (UniqueName: \"kubernetes.io/projected/666576c6-4d97-4bb5-a991-a89071ebff6d-kube-api-access-45r6k\") pod \"redhat-operators-klqmn\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.273037 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.559076 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-klqmn"] Nov 28 15:39:23 crc kubenswrapper[4647]: W1128 15:39:23.567358 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod666576c6_4d97_4bb5_a991_a89071ebff6d.slice/crio-3a82ea91951524cf8ecc23527387f394b83547e77678fd9c6a7202dc095589e5 WatchSource:0}: Error finding container 3a82ea91951524cf8ecc23527387f394b83547e77678fd9c6a7202dc095589e5: Status 404 returned error can't find the container with id 3a82ea91951524cf8ecc23527387f394b83547e77678fd9c6a7202dc095589e5 Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.931279 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7" event={"ID":"1a8f857d-6498-42ba-bbc5-2bb5b2896c6e","Type":"ContainerStarted","Data":"b03575083eaee5933fbe7c85c7ffdb79b69b5ad0982727753a021b0a5355d058"} Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.934069 4647 generic.go:334] "Generic (PLEG): container finished" podID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerID="fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e" exitCode=0 Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.934158 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-klqmn" event={"ID":"666576c6-4d97-4bb5-a991-a89071ebff6d","Type":"ContainerDied","Data":"fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e"} Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.934230 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-klqmn" event={"ID":"666576c6-4d97-4bb5-a991-a89071ebff6d","Type":"ContainerStarted","Data":"3a82ea91951524cf8ecc23527387f394b83547e77678fd9c6a7202dc095589e5"} Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.939152 4647 generic.go:334] "Generic (PLEG): container finished" podID="7410e071-f024-4355-b913-d7910b75ad42" containerID="020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea" exitCode=0 Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.939206 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2p9wb" event={"ID":"7410e071-f024-4355-b913-d7910b75ad42","Type":"ContainerDied","Data":"020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea"} Nov 28 15:39:23 crc kubenswrapper[4647]: I1128 15:39:23.955555 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-kvck7" podStartSLOduration=3.202429812 podStartE2EDuration="48.955535449s" podCreationTimestamp="2025-11-28 15:38:35 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.256551049 +0000 UTC m=+847.104157470" lastFinishedPulling="2025-11-28 15:39:23.009656686 +0000 UTC m=+892.857263107" observedRunningTime="2025-11-28 15:39:23.951527703 +0000 UTC m=+893.799134124" watchObservedRunningTime="2025-11-28 15:39:23.955535449 +0000 UTC m=+893.803141870" Nov 28 15:39:24 crc kubenswrapper[4647]: I1128 15:39:24.516015 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-748967c98-s88h5" Nov 28 15:39:24 crc kubenswrapper[4647]: I1128 15:39:24.527242 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-qbdbz" Nov 28 15:39:24 crc kubenswrapper[4647]: I1128 15:39:24.546558 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-687dh" Nov 28 15:39:24 crc kubenswrapper[4647]: I1128 15:39:24.591371 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-cpbpp" Nov 28 15:39:24 crc kubenswrapper[4647]: I1128 15:39:24.642782 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-w2446" Nov 28 15:39:24 crc kubenswrapper[4647]: I1128 15:39:24.861104 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-kdwxt" Nov 28 15:39:24 crc kubenswrapper[4647]: I1128 15:39:24.903521 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-54485f899-vl88c" Nov 28 15:39:24 crc kubenswrapper[4647]: I1128 15:39:24.930709 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-x2ggd" Nov 28 15:39:24 crc kubenswrapper[4647]: I1128 15:39:24.947776 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-klqmn" event={"ID":"666576c6-4d97-4bb5-a991-a89071ebff6d","Type":"ContainerStarted","Data":"edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773"} Nov 28 15:39:24 crc kubenswrapper[4647]: I1128 15:39:24.958795 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2p9wb" event={"ID":"7410e071-f024-4355-b913-d7910b75ad42","Type":"ContainerStarted","Data":"11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3"} Nov 28 15:39:25 crc kubenswrapper[4647]: I1128 15:39:25.032815 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-4tgc6" Nov 28 15:39:25 crc kubenswrapper[4647]: I1128 15:39:25.047011 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2p9wb" podStartSLOduration=2.981328853 podStartE2EDuration="7.046995979s" podCreationTimestamp="2025-11-28 15:39:18 +0000 UTC" firstStartedPulling="2025-11-28 15:39:20.383529079 +0000 UTC m=+890.231135540" lastFinishedPulling="2025-11-28 15:39:24.449196255 +0000 UTC m=+894.296802666" observedRunningTime="2025-11-28 15:39:25.020760634 +0000 UTC m=+894.868367055" watchObservedRunningTime="2025-11-28 15:39:25.046995979 +0000 UTC m=+894.894602400" Nov 28 15:39:25 crc kubenswrapper[4647]: I1128 15:39:25.103606 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-58879495c-d9tcm" Nov 28 15:39:25 crc kubenswrapper[4647]: I1128 15:39:25.124494 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-8szbq" Nov 28 15:39:25 crc kubenswrapper[4647]: I1128 15:39:25.205010 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-5992m" Nov 28 15:39:25 crc kubenswrapper[4647]: I1128 15:39:25.412394 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-2v9x8" Nov 28 15:39:25 crc kubenswrapper[4647]: I1128 15:39:25.697480 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-rk4n7" Nov 28 15:39:25 crc kubenswrapper[4647]: I1128 15:39:25.707777 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-zqz4k" Nov 28 15:39:26 crc kubenswrapper[4647]: I1128 15:39:26.218971 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-v9m4v" Nov 28 15:39:26 crc kubenswrapper[4647]: E1128 15:39:26.404027 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391\\\"\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" podUID="577fb6a4-bb39-4df2-b161-04b2ac2f44d4" Nov 28 15:39:27 crc kubenswrapper[4647]: I1128 15:39:27.180173 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-zg7rq" Nov 28 15:39:27 crc kubenswrapper[4647]: E1128 15:39:27.395375 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" podUID="93200d81-c9c3-4d5e-8406-112eef462119" Nov 28 15:39:28 crc kubenswrapper[4647]: I1128 15:39:28.000551 4647 generic.go:334] "Generic (PLEG): container finished" podID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerID="edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773" exitCode=0 Nov 28 15:39:28 crc kubenswrapper[4647]: I1128 15:39:28.000605 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-klqmn" event={"ID":"666576c6-4d97-4bb5-a991-a89071ebff6d","Type":"ContainerDied","Data":"edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773"} Nov 28 15:39:28 crc kubenswrapper[4647]: I1128 15:39:28.752250 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:28 crc kubenswrapper[4647]: I1128 15:39:28.752293 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:28 crc kubenswrapper[4647]: I1128 15:39:28.791515 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:29 crc kubenswrapper[4647]: I1128 15:39:29.098041 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:30 crc kubenswrapper[4647]: I1128 15:39:30.691719 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2p9wb"] Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.031968 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" event={"ID":"7ff4e4d2-ff33-484b-bc15-f0192f009688","Type":"ContainerStarted","Data":"048b474732b51335d46c0c6037fecadffd04cb5361b7bf76601867e815bd3658"} Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.032318 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.035601 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-klqmn" event={"ID":"666576c6-4d97-4bb5-a991-a89071ebff6d","Type":"ContainerStarted","Data":"0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0"} Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.035873 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2p9wb" podUID="7410e071-f024-4355-b913-d7910b75ad42" containerName="registry-server" containerID="cri-o://11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3" gracePeriod=2 Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.056854 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" podStartSLOduration=3.847199894 podStartE2EDuration="57.056831999s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.053305766 +0000 UTC m=+846.900912187" lastFinishedPulling="2025-11-28 15:39:30.262937871 +0000 UTC m=+900.110544292" observedRunningTime="2025-11-28 15:39:31.05044622 +0000 UTC m=+900.898052651" watchObservedRunningTime="2025-11-28 15:39:31.056831999 +0000 UTC m=+900.904438430" Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.087255 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-klqmn" podStartSLOduration=2.979964223 podStartE2EDuration="9.087234984s" podCreationTimestamp="2025-11-28 15:39:22 +0000 UTC" firstStartedPulling="2025-11-28 15:39:23.935816497 +0000 UTC m=+893.783422918" lastFinishedPulling="2025-11-28 15:39:30.043087248 +0000 UTC m=+899.890693679" observedRunningTime="2025-11-28 15:39:31.086189137 +0000 UTC m=+900.933795568" watchObservedRunningTime="2025-11-28 15:39:31.087234984 +0000 UTC m=+900.934841405" Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.435276 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.636952 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-catalog-content\") pod \"7410e071-f024-4355-b913-d7910b75ad42\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.637063 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxdxx\" (UniqueName: \"kubernetes.io/projected/7410e071-f024-4355-b913-d7910b75ad42-kube-api-access-nxdxx\") pod \"7410e071-f024-4355-b913-d7910b75ad42\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.638262 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-utilities\") pod \"7410e071-f024-4355-b913-d7910b75ad42\" (UID: \"7410e071-f024-4355-b913-d7910b75ad42\") " Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.639017 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-utilities" (OuterVolumeSpecName: "utilities") pod "7410e071-f024-4355-b913-d7910b75ad42" (UID: "7410e071-f024-4355-b913-d7910b75ad42"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.643102 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7410e071-f024-4355-b913-d7910b75ad42-kube-api-access-nxdxx" (OuterVolumeSpecName: "kube-api-access-nxdxx") pod "7410e071-f024-4355-b913-d7910b75ad42" (UID: "7410e071-f024-4355-b913-d7910b75ad42"). InnerVolumeSpecName "kube-api-access-nxdxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.699932 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7410e071-f024-4355-b913-d7910b75ad42" (UID: "7410e071-f024-4355-b913-d7910b75ad42"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.739701 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.739757 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxdxx\" (UniqueName: \"kubernetes.io/projected/7410e071-f024-4355-b913-d7910b75ad42-kube-api-access-nxdxx\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:31 crc kubenswrapper[4647]: I1128 15:39:31.739772 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7410e071-f024-4355-b913-d7910b75ad42-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.044346 4647 generic.go:334] "Generic (PLEG): container finished" podID="7410e071-f024-4355-b913-d7910b75ad42" containerID="11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3" exitCode=0 Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.044393 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2p9wb" event={"ID":"7410e071-f024-4355-b913-d7910b75ad42","Type":"ContainerDied","Data":"11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3"} Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.044478 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2p9wb" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.044507 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2p9wb" event={"ID":"7410e071-f024-4355-b913-d7910b75ad42","Type":"ContainerDied","Data":"552adb33ac15bb0ea5852581bd245cbf72ca44330e386afa1dd851ae199c3091"} Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.044533 4647 scope.go:117] "RemoveContainer" containerID="11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.070895 4647 scope.go:117] "RemoveContainer" containerID="020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.083380 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2p9wb"] Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.088398 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2p9wb"] Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.097365 4647 scope.go:117] "RemoveContainer" containerID="8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.113133 4647 scope.go:117] "RemoveContainer" containerID="11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3" Nov 28 15:39:32 crc kubenswrapper[4647]: E1128 15:39:32.114045 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3\": container with ID starting with 11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3 not found: ID does not exist" containerID="11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.114110 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3"} err="failed to get container status \"11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3\": rpc error: code = NotFound desc = could not find container \"11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3\": container with ID starting with 11a2ddd9df3dee410e497665ff079a914f7cca8e27ab0e6b6c25d49bf2a0d9c3 not found: ID does not exist" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.114157 4647 scope.go:117] "RemoveContainer" containerID="020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea" Nov 28 15:39:32 crc kubenswrapper[4647]: E1128 15:39:32.114589 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea\": container with ID starting with 020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea not found: ID does not exist" containerID="020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.114632 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea"} err="failed to get container status \"020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea\": rpc error: code = NotFound desc = could not find container \"020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea\": container with ID starting with 020c2a22aacd3f797b796a0824766b5e381f06978464a12343a2f8ed8c072fea not found: ID does not exist" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.114664 4647 scope.go:117] "RemoveContainer" containerID="8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737" Nov 28 15:39:32 crc kubenswrapper[4647]: E1128 15:39:32.114977 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737\": container with ID starting with 8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737 not found: ID does not exist" containerID="8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.115909 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737"} err="failed to get container status \"8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737\": rpc error: code = NotFound desc = could not find container \"8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737\": container with ID starting with 8e6139ec03e9d6385882c184549032defa3737eb896f10012df92b09e9c9e737 not found: ID does not exist" Nov 28 15:39:32 crc kubenswrapper[4647]: I1128 15:39:32.406756 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7410e071-f024-4355-b913-d7910b75ad42" path="/var/lib/kubelet/pods/7410e071-f024-4355-b913-d7910b75ad42/volumes" Nov 28 15:39:33 crc kubenswrapper[4647]: I1128 15:39:33.273351 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:33 crc kubenswrapper[4647]: I1128 15:39:33.274442 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:34 crc kubenswrapper[4647]: I1128 15:39:34.322894 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-klqmn" podUID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerName="registry-server" probeResult="failure" output=< Nov 28 15:39:34 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 15:39:34 crc kubenswrapper[4647]: > Nov 28 15:39:34 crc kubenswrapper[4647]: I1128 15:39:34.902286 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zkffl"] Nov 28 15:39:34 crc kubenswrapper[4647]: E1128 15:39:34.903239 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7410e071-f024-4355-b913-d7910b75ad42" containerName="registry-server" Nov 28 15:39:34 crc kubenswrapper[4647]: I1128 15:39:34.903279 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="7410e071-f024-4355-b913-d7910b75ad42" containerName="registry-server" Nov 28 15:39:34 crc kubenswrapper[4647]: E1128 15:39:34.903348 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7410e071-f024-4355-b913-d7910b75ad42" containerName="extract-utilities" Nov 28 15:39:34 crc kubenswrapper[4647]: I1128 15:39:34.903357 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="7410e071-f024-4355-b913-d7910b75ad42" containerName="extract-utilities" Nov 28 15:39:34 crc kubenswrapper[4647]: E1128 15:39:34.903380 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7410e071-f024-4355-b913-d7910b75ad42" containerName="extract-content" Nov 28 15:39:34 crc kubenswrapper[4647]: I1128 15:39:34.903388 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="7410e071-f024-4355-b913-d7910b75ad42" containerName="extract-content" Nov 28 15:39:34 crc kubenswrapper[4647]: I1128 15:39:34.903657 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="7410e071-f024-4355-b913-d7910b75ad42" containerName="registry-server" Nov 28 15:39:34 crc kubenswrapper[4647]: I1128 15:39:34.905006 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:34 crc kubenswrapper[4647]: I1128 15:39:34.930350 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zkffl"] Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.085583 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6462\" (UniqueName: \"kubernetes.io/projected/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-kube-api-access-k6462\") pod \"community-operators-zkffl\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.085636 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-catalog-content\") pod \"community-operators-zkffl\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.085669 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-utilities\") pod \"community-operators-zkffl\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.187871 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6462\" (UniqueName: \"kubernetes.io/projected/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-kube-api-access-k6462\") pod \"community-operators-zkffl\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.187974 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-catalog-content\") pod \"community-operators-zkffl\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.188296 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-utilities\") pod \"community-operators-zkffl\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.189245 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-catalog-content\") pod \"community-operators-zkffl\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.189394 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-utilities\") pod \"community-operators-zkffl\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.238991 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6462\" (UniqueName: \"kubernetes.io/projected/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-kube-api-access-k6462\") pod \"community-operators-zkffl\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.301758 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-jnnvb" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.524694 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:35 crc kubenswrapper[4647]: I1128 15:39:35.828381 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zkffl"] Nov 28 15:39:36 crc kubenswrapper[4647]: I1128 15:39:36.078376 4647 generic.go:334] "Generic (PLEG): container finished" podID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerID="844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc" exitCode=0 Nov 28 15:39:36 crc kubenswrapper[4647]: I1128 15:39:36.078495 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkffl" event={"ID":"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7","Type":"ContainerDied","Data":"844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc"} Nov 28 15:39:36 crc kubenswrapper[4647]: I1128 15:39:36.078535 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkffl" event={"ID":"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7","Type":"ContainerStarted","Data":"888c9ca3d18f1a10a92352d21760738f3110afd7d0e1892ddc308a2258901e80"} Nov 28 15:39:36 crc kubenswrapper[4647]: I1128 15:39:36.082526 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" event={"ID":"c2af1d24-9d02-4f14-95b7-3875382cb095","Type":"ContainerStarted","Data":"679404254cef1738ec24cd66a042470c061a5068e49f07b2075da1206064559f"} Nov 28 15:39:36 crc kubenswrapper[4647]: I1128 15:39:36.082802 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" Nov 28 15:39:36 crc kubenswrapper[4647]: I1128 15:39:36.117310 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" podStartSLOduration=4.367797713 podStartE2EDuration="1m2.117289014s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.236388425 +0000 UTC m=+847.083994846" lastFinishedPulling="2025-11-28 15:39:34.985879726 +0000 UTC m=+904.833486147" observedRunningTime="2025-11-28 15:39:36.113911825 +0000 UTC m=+905.961518246" watchObservedRunningTime="2025-11-28 15:39:36.117289014 +0000 UTC m=+905.964895435" Nov 28 15:39:38 crc kubenswrapper[4647]: I1128 15:39:38.101620 4647 generic.go:334] "Generic (PLEG): container finished" podID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerID="ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566" exitCode=0 Nov 28 15:39:38 crc kubenswrapper[4647]: I1128 15:39:38.101736 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkffl" event={"ID":"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7","Type":"ContainerDied","Data":"ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566"} Nov 28 15:39:39 crc kubenswrapper[4647]: I1128 15:39:39.124684 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" event={"ID":"577fb6a4-bb39-4df2-b161-04b2ac2f44d4","Type":"ContainerStarted","Data":"8a6985beb9fd1043418992af964c668a1d734aa6c5163a983d44e87b14ff8a7e"} Nov 28 15:39:39 crc kubenswrapper[4647]: I1128 15:39:39.124953 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" Nov 28 15:39:39 crc kubenswrapper[4647]: I1128 15:39:39.128171 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkffl" event={"ID":"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7","Type":"ContainerStarted","Data":"a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef"} Nov 28 15:39:39 crc kubenswrapper[4647]: I1128 15:39:39.147881 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" podStartSLOduration=4.379466431 podStartE2EDuration="1m5.147859923s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.12632012 +0000 UTC m=+846.973926541" lastFinishedPulling="2025-11-28 15:39:37.894713612 +0000 UTC m=+907.742320033" observedRunningTime="2025-11-28 15:39:39.141926826 +0000 UTC m=+908.989533247" watchObservedRunningTime="2025-11-28 15:39:39.147859923 +0000 UTC m=+908.995466354" Nov 28 15:39:39 crc kubenswrapper[4647]: I1128 15:39:39.169037 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zkffl" podStartSLOduration=2.568028943 podStartE2EDuration="5.169021444s" podCreationTimestamp="2025-11-28 15:39:34 +0000 UTC" firstStartedPulling="2025-11-28 15:39:36.08051091 +0000 UTC m=+905.928117341" lastFinishedPulling="2025-11-28 15:39:38.681503421 +0000 UTC m=+908.529109842" observedRunningTime="2025-11-28 15:39:39.166586069 +0000 UTC m=+909.014192510" watchObservedRunningTime="2025-11-28 15:39:39.169021444 +0000 UTC m=+909.016627865" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.246688 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qhzbc"] Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.249291 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.261024 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qhzbc"] Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.394587 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-utilities\") pod \"redhat-marketplace-qhzbc\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.394985 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48h7h\" (UniqueName: \"kubernetes.io/projected/a66b92fa-cce8-445f-8645-039f3991dd8a-kube-api-access-48h7h\") pod \"redhat-marketplace-qhzbc\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.395251 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-catalog-content\") pod \"redhat-marketplace-qhzbc\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.496169 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-utilities\") pod \"redhat-marketplace-qhzbc\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.496223 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48h7h\" (UniqueName: \"kubernetes.io/projected/a66b92fa-cce8-445f-8645-039f3991dd8a-kube-api-access-48h7h\") pod \"redhat-marketplace-qhzbc\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.496286 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-catalog-content\") pod \"redhat-marketplace-qhzbc\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.496834 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-catalog-content\") pod \"redhat-marketplace-qhzbc\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.497521 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-utilities\") pod \"redhat-marketplace-qhzbc\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.518731 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48h7h\" (UniqueName: \"kubernetes.io/projected/a66b92fa-cce8-445f-8645-039f3991dd8a-kube-api-access-48h7h\") pod \"redhat-marketplace-qhzbc\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:41 crc kubenswrapper[4647]: I1128 15:39:41.570770 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:42 crc kubenswrapper[4647]: I1128 15:39:42.046958 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qhzbc"] Nov 28 15:39:42 crc kubenswrapper[4647]: I1128 15:39:42.156454 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qhzbc" event={"ID":"a66b92fa-cce8-445f-8645-039f3991dd8a","Type":"ContainerStarted","Data":"450d0ea9c03a2c76eb384b3d7077271a9b1f9562cd3310e743b04bdc46b106f4"} Nov 28 15:39:43 crc kubenswrapper[4647]: I1128 15:39:43.166992 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" event={"ID":"93200d81-c9c3-4d5e-8406-112eef462119","Type":"ContainerStarted","Data":"f3b6a0ad67adfdc86d66b6aba628d660a9bcf6e524ad6c3ade7d349c0aa8fea0"} Nov 28 15:39:43 crc kubenswrapper[4647]: I1128 15:39:43.167603 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" Nov 28 15:39:43 crc kubenswrapper[4647]: I1128 15:39:43.169752 4647 generic.go:334] "Generic (PLEG): container finished" podID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerID="89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106" exitCode=0 Nov 28 15:39:43 crc kubenswrapper[4647]: I1128 15:39:43.169789 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qhzbc" event={"ID":"a66b92fa-cce8-445f-8645-039f3991dd8a","Type":"ContainerDied","Data":"89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106"} Nov 28 15:39:43 crc kubenswrapper[4647]: I1128 15:39:43.198552 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" podStartSLOduration=4.395954425 podStartE2EDuration="1m9.198532655s" podCreationTimestamp="2025-11-28 15:38:34 +0000 UTC" firstStartedPulling="2025-11-28 15:38:37.236600371 +0000 UTC m=+847.084206792" lastFinishedPulling="2025-11-28 15:39:42.039178601 +0000 UTC m=+911.886785022" observedRunningTime="2025-11-28 15:39:43.18727784 +0000 UTC m=+913.034884271" watchObservedRunningTime="2025-11-28 15:39:43.198532655 +0000 UTC m=+913.046139086" Nov 28 15:39:43 crc kubenswrapper[4647]: I1128 15:39:43.339986 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:43 crc kubenswrapper[4647]: I1128 15:39:43.388472 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:45 crc kubenswrapper[4647]: I1128 15:39:45.202103 4647 generic.go:334] "Generic (PLEG): container finished" podID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerID="a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8" exitCode=0 Nov 28 15:39:45 crc kubenswrapper[4647]: I1128 15:39:45.202233 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qhzbc" event={"ID":"a66b92fa-cce8-445f-8645-039f3991dd8a","Type":"ContainerDied","Data":"a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8"} Nov 28 15:39:45 crc kubenswrapper[4647]: I1128 15:39:45.526138 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:45 crc kubenswrapper[4647]: I1128 15:39:45.526385 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:45 crc kubenswrapper[4647]: I1128 15:39:45.586952 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:45 crc kubenswrapper[4647]: I1128 15:39:45.601699 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-867d87977b-pzkc9" Nov 28 15:39:45 crc kubenswrapper[4647]: I1128 15:39:45.701911 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-klqmn"] Nov 28 15:39:45 crc kubenswrapper[4647]: I1128 15:39:45.702179 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-klqmn" podUID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerName="registry-server" containerID="cri-o://0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0" gracePeriod=2 Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.104627 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.155202 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-bb86466d8-9dc64" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.220241 4647 generic.go:334] "Generic (PLEG): container finished" podID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerID="0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0" exitCode=0 Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.220334 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-klqmn" event={"ID":"666576c6-4d97-4bb5-a991-a89071ebff6d","Type":"ContainerDied","Data":"0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0"} Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.220367 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-klqmn" event={"ID":"666576c6-4d97-4bb5-a991-a89071ebff6d","Type":"ContainerDied","Data":"3a82ea91951524cf8ecc23527387f394b83547e77678fd9c6a7202dc095589e5"} Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.220392 4647 scope.go:117] "RemoveContainer" containerID="0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.220589 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-klqmn" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.226521 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qhzbc" event={"ID":"a66b92fa-cce8-445f-8645-039f3991dd8a","Type":"ContainerStarted","Data":"3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05"} Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.247800 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qhzbc" podStartSLOduration=2.701824212 podStartE2EDuration="5.247780294s" podCreationTimestamp="2025-11-28 15:39:41 +0000 UTC" firstStartedPulling="2025-11-28 15:39:43.172197165 +0000 UTC m=+913.019803596" lastFinishedPulling="2025-11-28 15:39:45.718153247 +0000 UTC m=+915.565759678" observedRunningTime="2025-11-28 15:39:46.244730294 +0000 UTC m=+916.092336725" watchObservedRunningTime="2025-11-28 15:39:46.247780294 +0000 UTC m=+916.095386725" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.249635 4647 scope.go:117] "RemoveContainer" containerID="edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.267106 4647 scope.go:117] "RemoveContainer" containerID="fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.270508 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-catalog-content\") pod \"666576c6-4d97-4bb5-a991-a89071ebff6d\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.270587 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45r6k\" (UniqueName: \"kubernetes.io/projected/666576c6-4d97-4bb5-a991-a89071ebff6d-kube-api-access-45r6k\") pod \"666576c6-4d97-4bb5-a991-a89071ebff6d\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.270608 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-utilities\") pod \"666576c6-4d97-4bb5-a991-a89071ebff6d\" (UID: \"666576c6-4d97-4bb5-a991-a89071ebff6d\") " Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.271705 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-utilities" (OuterVolumeSpecName: "utilities") pod "666576c6-4d97-4bb5-a991-a89071ebff6d" (UID: "666576c6-4d97-4bb5-a991-a89071ebff6d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.279694 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/666576c6-4d97-4bb5-a991-a89071ebff6d-kube-api-access-45r6k" (OuterVolumeSpecName: "kube-api-access-45r6k") pod "666576c6-4d97-4bb5-a991-a89071ebff6d" (UID: "666576c6-4d97-4bb5-a991-a89071ebff6d"). InnerVolumeSpecName "kube-api-access-45r6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.296461 4647 scope.go:117] "RemoveContainer" containerID="0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0" Nov 28 15:39:46 crc kubenswrapper[4647]: E1128 15:39:46.296958 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0\": container with ID starting with 0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0 not found: ID does not exist" containerID="0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.297001 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0"} err="failed to get container status \"0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0\": rpc error: code = NotFound desc = could not find container \"0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0\": container with ID starting with 0b3ae1c20fbe56fc2f48d4ee3f00eea785c3e1179a9f07bbc9868913cab3f3c0 not found: ID does not exist" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.297024 4647 scope.go:117] "RemoveContainer" containerID="edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773" Nov 28 15:39:46 crc kubenswrapper[4647]: E1128 15:39:46.297915 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773\": container with ID starting with edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773 not found: ID does not exist" containerID="edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.297976 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773"} err="failed to get container status \"edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773\": rpc error: code = NotFound desc = could not find container \"edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773\": container with ID starting with edccc55cb7a0f9296ea3ecefd8339422f8747f7398eccdb40ba24c1d08fbc773 not found: ID does not exist" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.298015 4647 scope.go:117] "RemoveContainer" containerID="fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e" Nov 28 15:39:46 crc kubenswrapper[4647]: E1128 15:39:46.298573 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e\": container with ID starting with fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e not found: ID does not exist" containerID="fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.298598 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e"} err="failed to get container status \"fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e\": rpc error: code = NotFound desc = could not find container \"fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e\": container with ID starting with fac39ceee73c7d855c18f2feb234d7cd8fd1e67395ca7d8d132596f66fea806e not found: ID does not exist" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.299159 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.372277 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45r6k\" (UniqueName: \"kubernetes.io/projected/666576c6-4d97-4bb5-a991-a89071ebff6d-kube-api-access-45r6k\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.372308 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.377626 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "666576c6-4d97-4bb5-a991-a89071ebff6d" (UID: "666576c6-4d97-4bb5-a991-a89071ebff6d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.474735 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/666576c6-4d97-4bb5-a991-a89071ebff6d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.546995 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-klqmn"] Nov 28 15:39:46 crc kubenswrapper[4647]: I1128 15:39:46.557911 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-klqmn"] Nov 28 15:39:47 crc kubenswrapper[4647]: I1128 15:39:47.022789 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:39:47 crc kubenswrapper[4647]: I1128 15:39:47.022847 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:39:48 crc kubenswrapper[4647]: I1128 15:39:48.095183 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zkffl"] Nov 28 15:39:48 crc kubenswrapper[4647]: I1128 15:39:48.242087 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zkffl" podUID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerName="registry-server" containerID="cri-o://a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef" gracePeriod=2 Nov 28 15:39:48 crc kubenswrapper[4647]: I1128 15:39:48.408761 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="666576c6-4d97-4bb5-a991-a89071ebff6d" path="/var/lib/kubelet/pods/666576c6-4d97-4bb5-a991-a89071ebff6d/volumes" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.175765 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.220330 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-utilities\") pod \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.220391 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-catalog-content\") pod \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.220670 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6462\" (UniqueName: \"kubernetes.io/projected/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-kube-api-access-k6462\") pod \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\" (UID: \"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7\") " Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.221284 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-utilities" (OuterVolumeSpecName: "utilities") pod "883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" (UID: "883d48c0-b0b4-4ff0-bc7f-f268543dcaa7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.229780 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-kube-api-access-k6462" (OuterVolumeSpecName: "kube-api-access-k6462") pod "883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" (UID: "883d48c0-b0b4-4ff0-bc7f-f268543dcaa7"). InnerVolumeSpecName "kube-api-access-k6462". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.254482 4647 generic.go:334] "Generic (PLEG): container finished" podID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerID="a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef" exitCode=0 Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.254705 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkffl" event={"ID":"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7","Type":"ContainerDied","Data":"a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef"} Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.254810 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkffl" event={"ID":"883d48c0-b0b4-4ff0-bc7f-f268543dcaa7","Type":"ContainerDied","Data":"888c9ca3d18f1a10a92352d21760738f3110afd7d0e1892ddc308a2258901e80"} Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.254937 4647 scope.go:117] "RemoveContainer" containerID="a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.255139 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zkffl" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.283691 4647 scope.go:117] "RemoveContainer" containerID="ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.294016 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" (UID: "883d48c0-b0b4-4ff0-bc7f-f268543dcaa7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.302944 4647 scope.go:117] "RemoveContainer" containerID="844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.322359 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6462\" (UniqueName: \"kubernetes.io/projected/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-kube-api-access-k6462\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.322697 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.322757 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.325628 4647 scope.go:117] "RemoveContainer" containerID="a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef" Nov 28 15:39:49 crc kubenswrapper[4647]: E1128 15:39:49.326974 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef\": container with ID starting with a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef not found: ID does not exist" containerID="a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.327084 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef"} err="failed to get container status \"a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef\": rpc error: code = NotFound desc = could not find container \"a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef\": container with ID starting with a92f8279649f4ef5d705ffdf775316dc59f4a8d2455382da7bb34342a042edef not found: ID does not exist" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.327166 4647 scope.go:117] "RemoveContainer" containerID="ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566" Nov 28 15:39:49 crc kubenswrapper[4647]: E1128 15:39:49.327673 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566\": container with ID starting with ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566 not found: ID does not exist" containerID="ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.327765 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566"} err="failed to get container status \"ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566\": rpc error: code = NotFound desc = could not find container \"ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566\": container with ID starting with ac52ad817521dc0b8975dcb0172f51905794e56205650822a83f75bd0f793566 not found: ID does not exist" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.327857 4647 scope.go:117] "RemoveContainer" containerID="844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc" Nov 28 15:39:49 crc kubenswrapper[4647]: E1128 15:39:49.328249 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc\": container with ID starting with 844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc not found: ID does not exist" containerID="844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.328287 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc"} err="failed to get container status \"844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc\": rpc error: code = NotFound desc = could not find container \"844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc\": container with ID starting with 844dae098aeede834843b99c25fdbcbb891c8deb657453a857bfd8f1d19efecc not found: ID does not exist" Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.599466 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zkffl"] Nov 28 15:39:49 crc kubenswrapper[4647]: I1128 15:39:49.604517 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zkffl"] Nov 28 15:39:50 crc kubenswrapper[4647]: I1128 15:39:50.402437 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" path="/var/lib/kubelet/pods/883d48c0-b0b4-4ff0-bc7f-f268543dcaa7/volumes" Nov 28 15:39:51 crc kubenswrapper[4647]: I1128 15:39:51.571664 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:51 crc kubenswrapper[4647]: I1128 15:39:51.571788 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:51 crc kubenswrapper[4647]: I1128 15:39:51.654316 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:52 crc kubenswrapper[4647]: I1128 15:39:52.314157 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:53 crc kubenswrapper[4647]: I1128 15:39:53.691144 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qhzbc"] Nov 28 15:39:54 crc kubenswrapper[4647]: I1128 15:39:54.294241 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qhzbc" podUID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerName="registry-server" containerID="cri-o://3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05" gracePeriod=2 Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.225140 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.303297 4647 generic.go:334] "Generic (PLEG): container finished" podID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerID="3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05" exitCode=0 Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.303336 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qhzbc" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.303356 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qhzbc" event={"ID":"a66b92fa-cce8-445f-8645-039f3991dd8a","Type":"ContainerDied","Data":"3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05"} Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.303397 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qhzbc" event={"ID":"a66b92fa-cce8-445f-8645-039f3991dd8a","Type":"ContainerDied","Data":"450d0ea9c03a2c76eb384b3d7077271a9b1f9562cd3310e743b04bdc46b106f4"} Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.303505 4647 scope.go:117] "RemoveContainer" containerID="3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.310266 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-catalog-content\") pod \"a66b92fa-cce8-445f-8645-039f3991dd8a\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.310378 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48h7h\" (UniqueName: \"kubernetes.io/projected/a66b92fa-cce8-445f-8645-039f3991dd8a-kube-api-access-48h7h\") pod \"a66b92fa-cce8-445f-8645-039f3991dd8a\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.310513 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-utilities\") pod \"a66b92fa-cce8-445f-8645-039f3991dd8a\" (UID: \"a66b92fa-cce8-445f-8645-039f3991dd8a\") " Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.312494 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-utilities" (OuterVolumeSpecName: "utilities") pod "a66b92fa-cce8-445f-8645-039f3991dd8a" (UID: "a66b92fa-cce8-445f-8645-039f3991dd8a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.317684 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a66b92fa-cce8-445f-8645-039f3991dd8a-kube-api-access-48h7h" (OuterVolumeSpecName: "kube-api-access-48h7h") pod "a66b92fa-cce8-445f-8645-039f3991dd8a" (UID: "a66b92fa-cce8-445f-8645-039f3991dd8a"). InnerVolumeSpecName "kube-api-access-48h7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.324931 4647 scope.go:117] "RemoveContainer" containerID="a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.344674 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a66b92fa-cce8-445f-8645-039f3991dd8a" (UID: "a66b92fa-cce8-445f-8645-039f3991dd8a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.351905 4647 scope.go:117] "RemoveContainer" containerID="89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.378511 4647 scope.go:117] "RemoveContainer" containerID="3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05" Nov 28 15:39:55 crc kubenswrapper[4647]: E1128 15:39:55.378906 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05\": container with ID starting with 3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05 not found: ID does not exist" containerID="3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.378946 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05"} err="failed to get container status \"3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05\": rpc error: code = NotFound desc = could not find container \"3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05\": container with ID starting with 3cab75f2c4e881d34992bf846826024a590edc77a368b068f3ef941e9bac0b05 not found: ID does not exist" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.378973 4647 scope.go:117] "RemoveContainer" containerID="a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8" Nov 28 15:39:55 crc kubenswrapper[4647]: E1128 15:39:55.379308 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8\": container with ID starting with a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8 not found: ID does not exist" containerID="a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.379342 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8"} err="failed to get container status \"a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8\": rpc error: code = NotFound desc = could not find container \"a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8\": container with ID starting with a7d7e88b30b7cd57ea1c5ed4a0812dd4859f32b02923f1e4520a3f1cdf9b65a8 not found: ID does not exist" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.379355 4647 scope.go:117] "RemoveContainer" containerID="89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106" Nov 28 15:39:55 crc kubenswrapper[4647]: E1128 15:39:55.379590 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106\": container with ID starting with 89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106 not found: ID does not exist" containerID="89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.379627 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106"} err="failed to get container status \"89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106\": rpc error: code = NotFound desc = could not find container \"89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106\": container with ID starting with 89201f275a8d441f706fca3681255f2b01038310c8736b98c6ccdaba570c3106 not found: ID does not exist" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.413313 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.413344 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66b92fa-cce8-445f-8645-039f3991dd8a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.413356 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48h7h\" (UniqueName: \"kubernetes.io/projected/a66b92fa-cce8-445f-8645-039f3991dd8a-kube-api-access-48h7h\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.641232 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qhzbc"] Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.650174 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qhzbc"] Nov 28 15:39:55 crc kubenswrapper[4647]: I1128 15:39:55.881455 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-f4jfb" Nov 28 15:39:56 crc kubenswrapper[4647]: I1128 15:39:56.406761 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a66b92fa-cce8-445f-8645-039f3991dd8a" path="/var/lib/kubelet/pods/a66b92fa-cce8-445f-8645-039f3991dd8a/volumes" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.150913 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zg2lv"] Nov 28 15:40:15 crc kubenswrapper[4647]: E1128 15:40:15.151803 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerName="extract-utilities" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.151818 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerName="extract-utilities" Nov 28 15:40:15 crc kubenswrapper[4647]: E1128 15:40:15.151834 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerName="registry-server" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.151840 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerName="registry-server" Nov 28 15:40:15 crc kubenswrapper[4647]: E1128 15:40:15.151853 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerName="extract-content" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.151860 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerName="extract-content" Nov 28 15:40:15 crc kubenswrapper[4647]: E1128 15:40:15.151875 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerName="extract-utilities" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.151882 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerName="extract-utilities" Nov 28 15:40:15 crc kubenswrapper[4647]: E1128 15:40:15.151896 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerName="registry-server" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.151908 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerName="registry-server" Nov 28 15:40:15 crc kubenswrapper[4647]: E1128 15:40:15.151933 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerName="extract-utilities" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.151945 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerName="extract-utilities" Nov 28 15:40:15 crc kubenswrapper[4647]: E1128 15:40:15.151959 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerName="extract-content" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.151965 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerName="extract-content" Nov 28 15:40:15 crc kubenswrapper[4647]: E1128 15:40:15.151977 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerName="extract-content" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.151984 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerName="extract-content" Nov 28 15:40:15 crc kubenswrapper[4647]: E1128 15:40:15.151992 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerName="registry-server" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.151998 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerName="registry-server" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.152128 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="a66b92fa-cce8-445f-8645-039f3991dd8a" containerName="registry-server" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.152146 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="666576c6-4d97-4bb5-a991-a89071ebff6d" containerName="registry-server" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.152155 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="883d48c0-b0b4-4ff0-bc7f-f268543dcaa7" containerName="registry-server" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.153041 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.157847 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.158466 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-8wd44" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.158588 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.158780 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.163514 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zg2lv"] Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.244258 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9307fb94-8cf4-4669-a01e-bd57d6878d44-config\") pod \"dnsmasq-dns-675f4bcbfc-zg2lv\" (UID: \"9307fb94-8cf4-4669-a01e-bd57d6878d44\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.244353 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wcxg\" (UniqueName: \"kubernetes.io/projected/9307fb94-8cf4-4669-a01e-bd57d6878d44-kube-api-access-9wcxg\") pod \"dnsmasq-dns-675f4bcbfc-zg2lv\" (UID: \"9307fb94-8cf4-4669-a01e-bd57d6878d44\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.276936 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-fl2lz"] Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.278740 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.283626 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-fl2lz"] Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.283735 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.345569 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-fl2lz\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.345621 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-config\") pod \"dnsmasq-dns-78dd6ddcc-fl2lz\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.345854 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wcxg\" (UniqueName: \"kubernetes.io/projected/9307fb94-8cf4-4669-a01e-bd57d6878d44-kube-api-access-9wcxg\") pod \"dnsmasq-dns-675f4bcbfc-zg2lv\" (UID: \"9307fb94-8cf4-4669-a01e-bd57d6878d44\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.346016 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgfmk\" (UniqueName: \"kubernetes.io/projected/e5fed567-39f9-4096-96b7-e9a9c2bb562a-kube-api-access-sgfmk\") pod \"dnsmasq-dns-78dd6ddcc-fl2lz\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.346106 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9307fb94-8cf4-4669-a01e-bd57d6878d44-config\") pod \"dnsmasq-dns-675f4bcbfc-zg2lv\" (UID: \"9307fb94-8cf4-4669-a01e-bd57d6878d44\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.347425 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9307fb94-8cf4-4669-a01e-bd57d6878d44-config\") pod \"dnsmasq-dns-675f4bcbfc-zg2lv\" (UID: \"9307fb94-8cf4-4669-a01e-bd57d6878d44\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.370688 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wcxg\" (UniqueName: \"kubernetes.io/projected/9307fb94-8cf4-4669-a01e-bd57d6878d44-kube-api-access-9wcxg\") pod \"dnsmasq-dns-675f4bcbfc-zg2lv\" (UID: \"9307fb94-8cf4-4669-a01e-bd57d6878d44\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.446989 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-fl2lz\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.447028 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-config\") pod \"dnsmasq-dns-78dd6ddcc-fl2lz\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.447077 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgfmk\" (UniqueName: \"kubernetes.io/projected/e5fed567-39f9-4096-96b7-e9a9c2bb562a-kube-api-access-sgfmk\") pod \"dnsmasq-dns-78dd6ddcc-fl2lz\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.448372 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-fl2lz\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.448524 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-config\") pod \"dnsmasq-dns-78dd6ddcc-fl2lz\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.471971 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.474835 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgfmk\" (UniqueName: \"kubernetes.io/projected/e5fed567-39f9-4096-96b7-e9a9c2bb562a-kube-api-access-sgfmk\") pod \"dnsmasq-dns-78dd6ddcc-fl2lz\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:15 crc kubenswrapper[4647]: I1128 15:40:15.595490 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:16 crc kubenswrapper[4647]: I1128 15:40:16.114095 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-fl2lz"] Nov 28 15:40:16 crc kubenswrapper[4647]: I1128 15:40:16.118363 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:40:16 crc kubenswrapper[4647]: I1128 15:40:16.180529 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zg2lv"] Nov 28 15:40:16 crc kubenswrapper[4647]: W1128 15:40:16.192780 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9307fb94_8cf4_4669_a01e_bd57d6878d44.slice/crio-4f4541dc1973284661c028af8255769b53379d406ec32a63caed2876151d9c51 WatchSource:0}: Error finding container 4f4541dc1973284661c028af8255769b53379d406ec32a63caed2876151d9c51: Status 404 returned error can't find the container with id 4f4541dc1973284661c028af8255769b53379d406ec32a63caed2876151d9c51 Nov 28 15:40:16 crc kubenswrapper[4647]: I1128 15:40:16.527111 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" event={"ID":"9307fb94-8cf4-4669-a01e-bd57d6878d44","Type":"ContainerStarted","Data":"4f4541dc1973284661c028af8255769b53379d406ec32a63caed2876151d9c51"} Nov 28 15:40:16 crc kubenswrapper[4647]: I1128 15:40:16.528784 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" event={"ID":"e5fed567-39f9-4096-96b7-e9a9c2bb562a","Type":"ContainerStarted","Data":"77f26ce93c4d63ca8a54681981b95c4f03dc4bbdbf06854045a1cdb4009269de"} Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.023092 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.023153 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.023201 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.023795 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"074d2a9435958e2c7736d08961659c11b166c59191e27b818dd5c1f09fc03871"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.023860 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://074d2a9435958e2c7736d08961659c11b166c59191e27b818dd5c1f09fc03871" gracePeriod=600 Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.562092 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="074d2a9435958e2c7736d08961659c11b166c59191e27b818dd5c1f09fc03871" exitCode=0 Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.562509 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"074d2a9435958e2c7736d08961659c11b166c59191e27b818dd5c1f09fc03871"} Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.562589 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"0d6ebaf8cb633650448f2badcf640129d01a9742c40868864eb5611603a41a80"} Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.562635 4647 scope.go:117] "RemoveContainer" containerID="488a9380c8e2899d9c0f82c2839f811a24e73a2d247aaa352058aec582928a19" Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.929972 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zg2lv"] Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.965175 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-rmcsh"] Nov 28 15:40:17 crc kubenswrapper[4647]: I1128 15:40:17.969563 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.002694 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-rmcsh"] Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.027807 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl5dw\" (UniqueName: \"kubernetes.io/projected/8d2be4c3-c046-4f12-8ded-817122672db4-kube-api-access-bl5dw\") pod \"dnsmasq-dns-666b6646f7-rmcsh\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.027881 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-dns-svc\") pod \"dnsmasq-dns-666b6646f7-rmcsh\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.027910 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-config\") pod \"dnsmasq-dns-666b6646f7-rmcsh\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.132969 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-config\") pod \"dnsmasq-dns-666b6646f7-rmcsh\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.133065 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl5dw\" (UniqueName: \"kubernetes.io/projected/8d2be4c3-c046-4f12-8ded-817122672db4-kube-api-access-bl5dw\") pod \"dnsmasq-dns-666b6646f7-rmcsh\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.133113 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-dns-svc\") pod \"dnsmasq-dns-666b6646f7-rmcsh\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.133920 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-dns-svc\") pod \"dnsmasq-dns-666b6646f7-rmcsh\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.134444 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-config\") pod \"dnsmasq-dns-666b6646f7-rmcsh\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.157574 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl5dw\" (UniqueName: \"kubernetes.io/projected/8d2be4c3-c046-4f12-8ded-817122672db4-kube-api-access-bl5dw\") pod \"dnsmasq-dns-666b6646f7-rmcsh\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.300058 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.372819 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-fl2lz"] Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.427262 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2lcfv"] Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.428345 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.448490 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2lcfv"] Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.542499 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snlkb\" (UniqueName: \"kubernetes.io/projected/61f83b6e-cf96-4134-b269-113b8ed46a48-kube-api-access-snlkb\") pod \"dnsmasq-dns-57d769cc4f-2lcfv\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.542740 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-config\") pod \"dnsmasq-dns-57d769cc4f-2lcfv\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.542806 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-2lcfv\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.644443 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-config\") pod \"dnsmasq-dns-57d769cc4f-2lcfv\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.644892 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-2lcfv\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.644932 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snlkb\" (UniqueName: \"kubernetes.io/projected/61f83b6e-cf96-4134-b269-113b8ed46a48-kube-api-access-snlkb\") pod \"dnsmasq-dns-57d769cc4f-2lcfv\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.646364 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-config\") pod \"dnsmasq-dns-57d769cc4f-2lcfv\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.647046 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-2lcfv\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.668841 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snlkb\" (UniqueName: \"kubernetes.io/projected/61f83b6e-cf96-4134-b269-113b8ed46a48-kube-api-access-snlkb\") pod \"dnsmasq-dns-57d769cc4f-2lcfv\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:18 crc kubenswrapper[4647]: I1128 15:40:18.768546 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.058747 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-rmcsh"] Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.154167 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.181629 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.184824 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.185008 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.185131 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.185228 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.189859 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-kcbph" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.189947 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.193823 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.200118 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.256723 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.256914 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.256994 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/49c7e330-cae6-469f-9a44-7087cc112af1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.257037 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/49c7e330-cae6-469f-9a44-7087cc112af1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.257140 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.257164 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.257211 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.257230 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-config-data\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.257273 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.257299 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.257318 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jmtq\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-kube-api-access-6jmtq\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.346747 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2lcfv"] Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360602 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360663 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/49c7e330-cae6-469f-9a44-7087cc112af1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360690 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/49c7e330-cae6-469f-9a44-7087cc112af1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360716 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360740 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360768 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360795 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-config-data\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360818 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360847 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360866 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jmtq\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-kube-api-access-6jmtq\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.360934 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.366998 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.368487 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.369226 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.371976 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/49c7e330-cae6-469f-9a44-7087cc112af1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.372019 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.372528 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-config-data\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.372756 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.375761 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/49c7e330-cae6-469f-9a44-7087cc112af1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.384106 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.391429 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.429879 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jmtq\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-kube-api-access-6jmtq\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.443912 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.533023 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.551887 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.552936 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.559053 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.559084 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.559090 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-r84c9" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.559208 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.559312 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.559350 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.559365 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.569455 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.637651 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" event={"ID":"8d2be4c3-c046-4f12-8ded-817122672db4","Type":"ContainerStarted","Data":"44fbfd8e9e94c6c4ad9a92ea3d0314ccd13f21082754694b693034b3658d75c5"} Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.656860 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" event={"ID":"61f83b6e-cf96-4134-b269-113b8ed46a48","Type":"ContainerStarted","Data":"c9a8fa09c4c48d68e10611d6fc002d69778f9a32efae49412b9df1e7aa6435fb"} Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671014 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671082 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6lsr\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-kube-api-access-k6lsr\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671122 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/27fe6c77-c0f2-4398-a337-133eaca78fb4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671168 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671190 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/27fe6c77-c0f2-4398-a337-133eaca78fb4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671221 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671247 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671267 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671309 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671340 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.671379 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.779427 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6lsr\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-kube-api-access-k6lsr\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.780052 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/27fe6c77-c0f2-4398-a337-133eaca78fb4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.780108 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.780130 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/27fe6c77-c0f2-4398-a337-133eaca78fb4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.780160 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.780186 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.780208 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.780249 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.780275 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.780313 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.780361 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.782545 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.782658 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.783031 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.786811 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.789829 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.790067 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.794112 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.801232 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/27fe6c77-c0f2-4398-a337-133eaca78fb4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.805523 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.808738 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/27fe6c77-c0f2-4398-a337-133eaca78fb4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.818426 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6lsr\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-kube-api-access-k6lsr\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.845803 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:19 crc kubenswrapper[4647]: I1128 15:40:19.925676 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.227246 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.533160 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.546175 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.549620 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.550228 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.550349 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.552055 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.554617 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.555208 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-xbchh" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.560899 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.638298 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.677777 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"49c7e330-cae6-469f-9a44-7087cc112af1","Type":"ContainerStarted","Data":"f59bed3300d1f113f96b013f69923c6e42d92008a55573b8a7a6a4d18dd0a1c9"} Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.682461 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"27fe6c77-c0f2-4398-a337-133eaca78fb4","Type":"ContainerStarted","Data":"683ec6eefa54f3ef671e6a174b2b046240f657da8b28c01a1c90bad69f3220c1"} Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.704469 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/63a87633-1166-4787-99ee-ec4a5fd02b87-kolla-config\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.704527 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vlrm\" (UniqueName: \"kubernetes.io/projected/63a87633-1166-4787-99ee-ec4a5fd02b87-kube-api-access-6vlrm\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.704580 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a87633-1166-4787-99ee-ec4a5fd02b87-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.704599 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/63a87633-1166-4787-99ee-ec4a5fd02b87-config-data-generated\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.704616 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/63a87633-1166-4787-99ee-ec4a5fd02b87-secrets\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.704637 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.704667 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63a87633-1166-4787-99ee-ec4a5fd02b87-operator-scripts\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.704694 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/63a87633-1166-4787-99ee-ec4a5fd02b87-config-data-default\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.704714 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a87633-1166-4787-99ee-ec4a5fd02b87-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.806151 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.806213 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63a87633-1166-4787-99ee-ec4a5fd02b87-operator-scripts\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.806275 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/63a87633-1166-4787-99ee-ec4a5fd02b87-config-data-default\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.806302 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a87633-1166-4787-99ee-ec4a5fd02b87-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.806453 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.808587 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/63a87633-1166-4787-99ee-ec4a5fd02b87-config-data-default\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.809805 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/63a87633-1166-4787-99ee-ec4a5fd02b87-kolla-config\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.810140 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vlrm\" (UniqueName: \"kubernetes.io/projected/63a87633-1166-4787-99ee-ec4a5fd02b87-kube-api-access-6vlrm\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.810739 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63a87633-1166-4787-99ee-ec4a5fd02b87-operator-scripts\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.810874 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a87633-1166-4787-99ee-ec4a5fd02b87-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.810909 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/63a87633-1166-4787-99ee-ec4a5fd02b87-config-data-generated\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.810927 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/63a87633-1166-4787-99ee-ec4a5fd02b87-secrets\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.811354 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/63a87633-1166-4787-99ee-ec4a5fd02b87-config-data-generated\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.812980 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/63a87633-1166-4787-99ee-ec4a5fd02b87-kolla-config\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.823092 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a87633-1166-4787-99ee-ec4a5fd02b87-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.839478 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/63a87633-1166-4787-99ee-ec4a5fd02b87-secrets\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.843742 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a87633-1166-4787-99ee-ec4a5fd02b87-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.847593 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vlrm\" (UniqueName: \"kubernetes.io/projected/63a87633-1166-4787-99ee-ec4a5fd02b87-kube-api-access-6vlrm\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:20 crc kubenswrapper[4647]: I1128 15:40:20.870722 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"63a87633-1166-4787-99ee-ec4a5fd02b87\") " pod="openstack/openstack-galera-0" Nov 28 15:40:21 crc kubenswrapper[4647]: I1128 15:40:21.177843 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 15:40:21 crc kubenswrapper[4647]: I1128 15:40:21.960873 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 15:40:21 crc kubenswrapper[4647]: I1128 15:40:21.973096 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 15:40:21 crc kubenswrapper[4647]: I1128 15:40:21.975106 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:21 crc kubenswrapper[4647]: I1128 15:40:21.983960 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 15:40:21 crc kubenswrapper[4647]: I1128 15:40:21.985193 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-26gdq" Nov 28 15:40:21 crc kubenswrapper[4647]: I1128 15:40:21.988666 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 15:40:21 crc kubenswrapper[4647]: I1128 15:40:21.988910 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 15:40:21 crc kubenswrapper[4647]: I1128 15:40:21.996075 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 15:40:22 crc kubenswrapper[4647]: W1128 15:40:22.078044 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63a87633_1166_4787_99ee_ec4a5fd02b87.slice/crio-19ea7b2d93430011b7ebdc74f705fe225d426120c34940e44d491c9d744774a3 WatchSource:0}: Error finding container 19ea7b2d93430011b7ebdc74f705fe225d426120c34940e44d491c9d744774a3: Status 404 returned error can't find the container with id 19ea7b2d93430011b7ebdc74f705fe225d426120c34940e44d491c9d744774a3 Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.152375 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f876088-07c2-4cb0-8096-681aaf594d6a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.152763 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/0f876088-07c2-4cb0-8096-681aaf594d6a-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.152791 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0f876088-07c2-4cb0-8096-681aaf594d6a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.152815 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f876088-07c2-4cb0-8096-681aaf594d6a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.152835 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0f876088-07c2-4cb0-8096-681aaf594d6a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.152855 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f876088-07c2-4cb0-8096-681aaf594d6a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.152940 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrjrz\" (UniqueName: \"kubernetes.io/projected/0f876088-07c2-4cb0-8096-681aaf594d6a-kube-api-access-zrjrz\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.153031 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0f876088-07c2-4cb0-8096-681aaf594d6a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.153185 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.254309 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.254370 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f876088-07c2-4cb0-8096-681aaf594d6a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.254402 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/0f876088-07c2-4cb0-8096-681aaf594d6a-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.254442 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0f876088-07c2-4cb0-8096-681aaf594d6a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.254467 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f876088-07c2-4cb0-8096-681aaf594d6a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.254482 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0f876088-07c2-4cb0-8096-681aaf594d6a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.254498 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f876088-07c2-4cb0-8096-681aaf594d6a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.254517 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrjrz\" (UniqueName: \"kubernetes.io/projected/0f876088-07c2-4cb0-8096-681aaf594d6a-kube-api-access-zrjrz\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.254541 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0f876088-07c2-4cb0-8096-681aaf594d6a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.255451 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0f876088-07c2-4cb0-8096-681aaf594d6a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.255703 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.260922 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f876088-07c2-4cb0-8096-681aaf594d6a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.261400 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0f876088-07c2-4cb0-8096-681aaf594d6a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.261884 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0f876088-07c2-4cb0-8096-681aaf594d6a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.267627 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/0f876088-07c2-4cb0-8096-681aaf594d6a-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.270010 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f876088-07c2-4cb0-8096-681aaf594d6a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.275157 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f876088-07c2-4cb0-8096-681aaf594d6a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.290145 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.291535 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrjrz\" (UniqueName: \"kubernetes.io/projected/0f876088-07c2-4cb0-8096-681aaf594d6a-kube-api-access-zrjrz\") pod \"openstack-cell1-galera-0\" (UID: \"0f876088-07c2-4cb0-8096-681aaf594d6a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.324486 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.349768 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.351272 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.355903 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-dlqjf" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.355928 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.356298 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.456720 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b7a9cfb5-c9cd-45ee-906e-70926173aa87-kolla-config\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.456764 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7a9cfb5-c9cd-45ee-906e-70926173aa87-config-data\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.456783 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tc2hw\" (UniqueName: \"kubernetes.io/projected/b7a9cfb5-c9cd-45ee-906e-70926173aa87-kube-api-access-tc2hw\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.456801 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7a9cfb5-c9cd-45ee-906e-70926173aa87-memcached-tls-certs\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.456816 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7a9cfb5-c9cd-45ee-906e-70926173aa87-combined-ca-bundle\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.497072 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.559824 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b7a9cfb5-c9cd-45ee-906e-70926173aa87-kolla-config\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.559868 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7a9cfb5-c9cd-45ee-906e-70926173aa87-config-data\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.559895 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tc2hw\" (UniqueName: \"kubernetes.io/projected/b7a9cfb5-c9cd-45ee-906e-70926173aa87-kube-api-access-tc2hw\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.559911 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7a9cfb5-c9cd-45ee-906e-70926173aa87-memcached-tls-certs\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.559926 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7a9cfb5-c9cd-45ee-906e-70926173aa87-combined-ca-bundle\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.561689 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b7a9cfb5-c9cd-45ee-906e-70926173aa87-kolla-config\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.562155 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7a9cfb5-c9cd-45ee-906e-70926173aa87-config-data\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.568349 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/b7a9cfb5-c9cd-45ee-906e-70926173aa87-memcached-tls-certs\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.575111 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7a9cfb5-c9cd-45ee-906e-70926173aa87-combined-ca-bundle\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.598443 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tc2hw\" (UniqueName: \"kubernetes.io/projected/b7a9cfb5-c9cd-45ee-906e-70926173aa87-kube-api-access-tc2hw\") pod \"memcached-0\" (UID: \"b7a9cfb5-c9cd-45ee-906e-70926173aa87\") " pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.716219 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 15:40:22 crc kubenswrapper[4647]: I1128 15:40:22.775918 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"63a87633-1166-4787-99ee-ec4a5fd02b87","Type":"ContainerStarted","Data":"19ea7b2d93430011b7ebdc74f705fe225d426120c34940e44d491c9d744774a3"} Nov 28 15:40:23 crc kubenswrapper[4647]: I1128 15:40:23.245800 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 15:40:23 crc kubenswrapper[4647]: W1128 15:40:23.341557 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f876088_07c2_4cb0_8096_681aaf594d6a.slice/crio-c5eb02a53bab18b67e9045c9a97b305e99d012b7f0f3bd252d9cacf1d57ac8d6 WatchSource:0}: Error finding container c5eb02a53bab18b67e9045c9a97b305e99d012b7f0f3bd252d9cacf1d57ac8d6: Status 404 returned error can't find the container with id c5eb02a53bab18b67e9045c9a97b305e99d012b7f0f3bd252d9cacf1d57ac8d6 Nov 28 15:40:23 crc kubenswrapper[4647]: I1128 15:40:23.398158 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 15:40:23 crc kubenswrapper[4647]: W1128 15:40:23.419551 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7a9cfb5_c9cd_45ee_906e_70926173aa87.slice/crio-f35a84d30589beac236fce4564c85c3c45c7be95bc13c01a2073cf8ed5fdba79 WatchSource:0}: Error finding container f35a84d30589beac236fce4564c85c3c45c7be95bc13c01a2073cf8ed5fdba79: Status 404 returned error can't find the container with id f35a84d30589beac236fce4564c85c3c45c7be95bc13c01a2073cf8ed5fdba79 Nov 28 15:40:23 crc kubenswrapper[4647]: I1128 15:40:23.829862 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0f876088-07c2-4cb0-8096-681aaf594d6a","Type":"ContainerStarted","Data":"c5eb02a53bab18b67e9045c9a97b305e99d012b7f0f3bd252d9cacf1d57ac8d6"} Nov 28 15:40:23 crc kubenswrapper[4647]: I1128 15:40:23.854582 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"b7a9cfb5-c9cd-45ee-906e-70926173aa87","Type":"ContainerStarted","Data":"f35a84d30589beac236fce4564c85c3c45c7be95bc13c01a2073cf8ed5fdba79"} Nov 28 15:40:23 crc kubenswrapper[4647]: I1128 15:40:23.971404 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:40:23 crc kubenswrapper[4647]: I1128 15:40:23.973051 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:40:23 crc kubenswrapper[4647]: I1128 15:40:23.978477 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-z6kwq" Nov 28 15:40:23 crc kubenswrapper[4647]: I1128 15:40:23.985261 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:40:24 crc kubenswrapper[4647]: I1128 15:40:24.095016 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zt6xx\" (UniqueName: \"kubernetes.io/projected/4ff96da1-31d9-4b0c-8fbc-32c25a416a5c-kube-api-access-zt6xx\") pod \"kube-state-metrics-0\" (UID: \"4ff96da1-31d9-4b0c-8fbc-32c25a416a5c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:40:24 crc kubenswrapper[4647]: I1128 15:40:24.197452 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zt6xx\" (UniqueName: \"kubernetes.io/projected/4ff96da1-31d9-4b0c-8fbc-32c25a416a5c-kube-api-access-zt6xx\") pod \"kube-state-metrics-0\" (UID: \"4ff96da1-31d9-4b0c-8fbc-32c25a416a5c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:40:24 crc kubenswrapper[4647]: I1128 15:40:24.228124 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zt6xx\" (UniqueName: \"kubernetes.io/projected/4ff96da1-31d9-4b0c-8fbc-32c25a416a5c-kube-api-access-zt6xx\") pod \"kube-state-metrics-0\" (UID: \"4ff96da1-31d9-4b0c-8fbc-32c25a416a5c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:40:24 crc kubenswrapper[4647]: I1128 15:40:24.317090 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:40:25 crc kubenswrapper[4647]: I1128 15:40:24.998629 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:40:25 crc kubenswrapper[4647]: W1128 15:40:25.086622 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ff96da1_31d9_4b0c_8fbc_32c25a416a5c.slice/crio-0b7af25522ec50df0a4c702e22b851c4a674f81f1936ff423b891f334665c9e8 WatchSource:0}: Error finding container 0b7af25522ec50df0a4c702e22b851c4a674f81f1936ff423b891f334665c9e8: Status 404 returned error can't find the container with id 0b7af25522ec50df0a4c702e22b851c4a674f81f1936ff423b891f334665c9e8 Nov 28 15:40:25 crc kubenswrapper[4647]: I1128 15:40:25.963171 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4ff96da1-31d9-4b0c-8fbc-32c25a416a5c","Type":"ContainerStarted","Data":"0b7af25522ec50df0a4c702e22b851c4a674f81f1936ff423b891f334665c9e8"} Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.797727 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.799638 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.813057 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.813316 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.813540 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-5lfvd" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.814203 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.814356 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.819655 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.914957 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zggw7\" (UniqueName: \"kubernetes.io/projected/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-kube-api-access-zggw7\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.915023 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-config\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.915080 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.915214 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.915301 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.915359 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.915403 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:27 crc kubenswrapper[4647]: I1128 15:40:27.915548 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.017788 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.017875 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.017931 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.017957 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zggw7\" (UniqueName: \"kubernetes.io/projected/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-kube-api-access-zggw7\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.018092 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-config\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.018175 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.018217 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.018283 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.019476 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.020206 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.021288 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-config\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.021436 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.027003 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.039491 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.041023 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.042639 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zggw7\" (UniqueName: \"kubernetes.io/projected/cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0-kube-api-access-zggw7\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.049224 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-sb-0\" (UID: \"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.143028 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.147244 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-4psvg"] Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.148335 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.157022 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.157182 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.158278 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-bq9hs" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.190398 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-4psvg"] Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.222353 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fab755a9-f20f-4bc6-a7e2-353396a0ce74-combined-ca-bundle\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.223235 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfnq8\" (UniqueName: \"kubernetes.io/projected/fab755a9-f20f-4bc6-a7e2-353396a0ce74-kube-api-access-mfnq8\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.223330 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fab755a9-f20f-4bc6-a7e2-353396a0ce74-var-run\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.223441 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fab755a9-f20f-4bc6-a7e2-353396a0ce74-var-run-ovn\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.223527 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fab755a9-f20f-4bc6-a7e2-353396a0ce74-ovn-controller-tls-certs\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.223621 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fab755a9-f20f-4bc6-a7e2-353396a0ce74-var-log-ovn\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.223705 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fab755a9-f20f-4bc6-a7e2-353396a0ce74-scripts\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.238201 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-vh9b5"] Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.242972 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.326083 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfnq8\" (UniqueName: \"kubernetes.io/projected/fab755a9-f20f-4bc6-a7e2-353396a0ce74-kube-api-access-mfnq8\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.326165 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-var-lib\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.326193 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fab755a9-f20f-4bc6-a7e2-353396a0ce74-var-run\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.326216 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-var-log\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.326244 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fab755a9-f20f-4bc6-a7e2-353396a0ce74-var-run-ovn\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.326279 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fab755a9-f20f-4bc6-a7e2-353396a0ce74-ovn-controller-tls-certs\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.326306 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-etc-ovs\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.326336 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fab755a9-f20f-4bc6-a7e2-353396a0ce74-var-log-ovn\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.326359 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-var-run\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.326416 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fab755a9-f20f-4bc6-a7e2-353396a0ce74-scripts\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.330592 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czkgq\" (UniqueName: \"kubernetes.io/projected/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-kube-api-access-czkgq\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.330649 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fab755a9-f20f-4bc6-a7e2-353396a0ce74-combined-ca-bundle\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.330683 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-scripts\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.329020 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fab755a9-f20f-4bc6-a7e2-353396a0ce74-var-run-ovn\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.329523 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fab755a9-f20f-4bc6-a7e2-353396a0ce74-var-run\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.330097 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fab755a9-f20f-4bc6-a7e2-353396a0ce74-var-log-ovn\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.333066 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fab755a9-f20f-4bc6-a7e2-353396a0ce74-scripts\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.337700 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-vh9b5"] Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.339076 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fab755a9-f20f-4bc6-a7e2-353396a0ce74-ovn-controller-tls-certs\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.347794 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfnq8\" (UniqueName: \"kubernetes.io/projected/fab755a9-f20f-4bc6-a7e2-353396a0ce74-kube-api-access-mfnq8\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.354111 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fab755a9-f20f-4bc6-a7e2-353396a0ce74-combined-ca-bundle\") pod \"ovn-controller-4psvg\" (UID: \"fab755a9-f20f-4bc6-a7e2-353396a0ce74\") " pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.432304 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czkgq\" (UniqueName: \"kubernetes.io/projected/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-kube-api-access-czkgq\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.432419 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-scripts\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.432488 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-var-lib\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.432515 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-var-log\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.432545 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-etc-ovs\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.432578 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-var-run\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.433445 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-var-run\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.433661 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-var-log\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.433727 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-etc-ovs\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.433909 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-var-lib\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.438244 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-scripts\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.450755 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czkgq\" (UniqueName: \"kubernetes.io/projected/a5d1c6f3-2c4f-4046-81fc-c2e210100c4b-kube-api-access-czkgq\") pod \"ovn-controller-ovs-vh9b5\" (UID: \"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b\") " pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.473398 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-4psvg" Nov 28 15:40:28 crc kubenswrapper[4647]: I1128 15:40:28.619312 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:40:29 crc kubenswrapper[4647]: I1128 15:40:29.448810 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-4psvg"] Nov 28 15:40:29 crc kubenswrapper[4647]: W1128 15:40:29.630446 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfab755a9_f20f_4bc6_a7e2_353396a0ce74.slice/crio-90ae3bd04a0c5cedbd396f7a411dbf8868b461eec73b173065b8a160f48f20c9 WatchSource:0}: Error finding container 90ae3bd04a0c5cedbd396f7a411dbf8868b461eec73b173065b8a160f48f20c9: Status 404 returned error can't find the container with id 90ae3bd04a0c5cedbd396f7a411dbf8868b461eec73b173065b8a160f48f20c9 Nov 28 15:40:29 crc kubenswrapper[4647]: I1128 15:40:29.775091 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 15:40:29 crc kubenswrapper[4647]: W1128 15:40:29.789057 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcbf8b62e_4172_48bb_aeb8_a3f6e6cffdb0.slice/crio-3e41de3d78ee2d926aaa3d30e1ca960a6f5b6613d528e9e011bd443ec845ffff WatchSource:0}: Error finding container 3e41de3d78ee2d926aaa3d30e1ca960a6f5b6613d528e9e011bd443ec845ffff: Status 404 returned error can't find the container with id 3e41de3d78ee2d926aaa3d30e1ca960a6f5b6613d528e9e011bd443ec845ffff Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.144547 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0","Type":"ContainerStarted","Data":"3e41de3d78ee2d926aaa3d30e1ca960a6f5b6613d528e9e011bd443ec845ffff"} Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.165236 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-4psvg" event={"ID":"fab755a9-f20f-4bc6-a7e2-353396a0ce74","Type":"ContainerStarted","Data":"90ae3bd04a0c5cedbd396f7a411dbf8868b461eec73b173065b8a160f48f20c9"} Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.183310 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-vh9b5"] Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.314578 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-zql68"] Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.321049 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.324140 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.351348 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-zql68"] Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.388859 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.388901 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-config\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.388921 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-combined-ca-bundle\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.388940 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g6gc\" (UniqueName: \"kubernetes.io/projected/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-kube-api-access-7g6gc\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.388958 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-ovs-rundir\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.389472 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-ovn-rundir\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.495752 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.495808 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-config\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.495830 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-combined-ca-bundle\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.495853 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g6gc\" (UniqueName: \"kubernetes.io/projected/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-kube-api-access-7g6gc\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.495874 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-ovs-rundir\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.495910 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-ovn-rundir\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.496398 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-ovn-rundir\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.519496 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-ovs-rundir\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.524869 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.556240 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.557154 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-config\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.571455 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-combined-ca-bundle\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.659024 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g6gc\" (UniqueName: \"kubernetes.io/projected/083c881c-8e40-4d03-b4f1-91af7bcd2cd1-kube-api-access-7g6gc\") pod \"ovn-controller-metrics-zql68\" (UID: \"083c881c-8e40-4d03-b4f1-91af7bcd2cd1\") " pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.660476 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-zql68" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.760613 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-rmcsh"] Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.780684 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-9wnbc"] Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.782228 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.788325 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.793713 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-9wnbc"] Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.922004 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-config\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.922101 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.922242 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:30 crc kubenswrapper[4647]: I1128 15:40:30.922313 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d66xf\" (UniqueName: \"kubernetes.io/projected/6d088a31-8d23-4030-8398-de9b589b301c-kube-api-access-d66xf\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.024198 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.024292 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.024322 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d66xf\" (UniqueName: \"kubernetes.io/projected/6d088a31-8d23-4030-8398-de9b589b301c-kube-api-access-d66xf\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.024386 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-config\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.025595 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-config\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.026087 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.026592 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.066630 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d66xf\" (UniqueName: \"kubernetes.io/projected/6d088a31-8d23-4030-8398-de9b589b301c-kube-api-access-d66xf\") pod \"dnsmasq-dns-6bc7876d45-9wnbc\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.109962 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.164822 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.167496 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.171567 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.171686 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.171855 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.171908 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-2jdx9" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.182694 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.197480 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vh9b5" event={"ID":"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b","Type":"ContainerStarted","Data":"113dc3555664d7812e88c29df42d77e89c6c32847bcfb6e6dfa466d639b1c935"} Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.228668 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.229289 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3257de-cbde-4dca-89c7-21af1617cc66-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.229329 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/de3257de-cbde-4dca-89c7-21af1617cc66-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.229371 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3257de-cbde-4dca-89c7-21af1617cc66-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.229568 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v27pn\" (UniqueName: \"kubernetes.io/projected/de3257de-cbde-4dca-89c7-21af1617cc66-kube-api-access-v27pn\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.229617 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de3257de-cbde-4dca-89c7-21af1617cc66-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.229641 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de3257de-cbde-4dca-89c7-21af1617cc66-config\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.229715 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3257de-cbde-4dca-89c7-21af1617cc66-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.291349 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-zql68"] Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.332151 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3257de-cbde-4dca-89c7-21af1617cc66-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.332243 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.332290 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3257de-cbde-4dca-89c7-21af1617cc66-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.332327 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/de3257de-cbde-4dca-89c7-21af1617cc66-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.332359 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3257de-cbde-4dca-89c7-21af1617cc66-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.332402 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v27pn\" (UniqueName: \"kubernetes.io/projected/de3257de-cbde-4dca-89c7-21af1617cc66-kube-api-access-v27pn\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.332454 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de3257de-cbde-4dca-89c7-21af1617cc66-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.332477 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de3257de-cbde-4dca-89c7-21af1617cc66-config\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.332881 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/de3257de-cbde-4dca-89c7-21af1617cc66-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.332935 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.333869 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de3257de-cbde-4dca-89c7-21af1617cc66-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.334465 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de3257de-cbde-4dca-89c7-21af1617cc66-config\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.341106 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3257de-cbde-4dca-89c7-21af1617cc66-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.341106 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3257de-cbde-4dca-89c7-21af1617cc66-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.356997 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3257de-cbde-4dca-89c7-21af1617cc66-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.358020 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.372241 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v27pn\" (UniqueName: \"kubernetes.io/projected/de3257de-cbde-4dca-89c7-21af1617cc66-kube-api-access-v27pn\") pod \"ovsdbserver-nb-0\" (UID: \"de3257de-cbde-4dca-89c7-21af1617cc66\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:31 crc kubenswrapper[4647]: I1128 15:40:31.495347 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:37 crc kubenswrapper[4647]: I1128 15:40:37.298273 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-zql68" event={"ID":"083c881c-8e40-4d03-b4f1-91af7bcd2cd1","Type":"ContainerStarted","Data":"eb99407255b2013ba81e443ca6a40431c618694c22da3001dab240aba7da888c"} Nov 28 15:40:50 crc kubenswrapper[4647]: E1128 15:40:50.456904 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 28 15:40:50 crc kubenswrapper[4647]: E1128 15:40:50.458191 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6vlrm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(63a87633-1166-4787-99ee-ec4a5fd02b87): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:40:50 crc kubenswrapper[4647]: E1128 15:40:50.460179 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="63a87633-1166-4787-99ee-ec4a5fd02b87" Nov 28 15:40:51 crc kubenswrapper[4647]: E1128 15:40:51.463052 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="63a87633-1166-4787-99ee-ec4a5fd02b87" Nov 28 15:40:52 crc kubenswrapper[4647]: E1128 15:40:52.221116 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 15:40:52 crc kubenswrapper[4647]: E1128 15:40:52.221332 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6jmtq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(49c7e330-cae6-469f-9a44-7087cc112af1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:40:52 crc kubenswrapper[4647]: E1128 15:40:52.222740 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="49c7e330-cae6-469f-9a44-7087cc112af1" Nov 28 15:40:52 crc kubenswrapper[4647]: E1128 15:40:52.243154 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 15:40:52 crc kubenswrapper[4647]: E1128 15:40:52.243306 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k6lsr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(27fe6c77-c0f2-4398-a337-133eaca78fb4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:40:52 crc kubenswrapper[4647]: E1128 15:40:52.245545 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="27fe6c77-c0f2-4398-a337-133eaca78fb4" Nov 28 15:40:52 crc kubenswrapper[4647]: E1128 15:40:52.470993 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="49c7e330-cae6-469f-9a44-7087cc112af1" Nov 28 15:40:52 crc kubenswrapper[4647]: E1128 15:40:52.475921 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="27fe6c77-c0f2-4398-a337-133eaca78fb4" Nov 28 15:40:54 crc kubenswrapper[4647]: E1128 15:40:54.851465 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 15:40:54 crc kubenswrapper[4647]: E1128 15:40:54.851935 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bl5dw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-rmcsh_openstack(8d2be4c3-c046-4f12-8ded-817122672db4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:40:54 crc kubenswrapper[4647]: E1128 15:40:54.853196 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" podUID="8d2be4c3-c046-4f12-8ded-817122672db4" Nov 28 15:40:54 crc kubenswrapper[4647]: E1128 15:40:54.978523 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 15:40:54 crc kubenswrapper[4647]: E1128 15:40:54.978849 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sgfmk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-fl2lz_openstack(e5fed567-39f9-4096-96b7-e9a9c2bb562a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:40:54 crc kubenswrapper[4647]: E1128 15:40:54.981179 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" podUID="e5fed567-39f9-4096-96b7-e9a9c2bb562a" Nov 28 15:40:54 crc kubenswrapper[4647]: E1128 15:40:54.996946 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 15:40:54 crc kubenswrapper[4647]: E1128 15:40:54.997172 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-snlkb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-2lcfv_openstack(61f83b6e-cf96-4134-b269-113b8ed46a48): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:40:54 crc kubenswrapper[4647]: E1128 15:40:54.998226 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" podUID="61f83b6e-cf96-4134-b269-113b8ed46a48" Nov 28 15:40:55 crc kubenswrapper[4647]: E1128 15:40:55.513437 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" podUID="61f83b6e-cf96-4134-b269-113b8ed46a48" Nov 28 15:40:57 crc kubenswrapper[4647]: E1128 15:40:57.377501 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 15:40:57 crc kubenswrapper[4647]: E1128 15:40:57.378473 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9wcxg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-zg2lv_openstack(9307fb94-8cf4-4669-a01e-bd57d6878d44): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:40:57 crc kubenswrapper[4647]: E1128 15:40:57.380175 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" podUID="9307fb94-8cf4-4669-a01e-bd57d6878d44" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.843677 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.855177 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.950857 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl5dw\" (UniqueName: \"kubernetes.io/projected/8d2be4c3-c046-4f12-8ded-817122672db4-kube-api-access-bl5dw\") pod \"8d2be4c3-c046-4f12-8ded-817122672db4\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.951019 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-config\") pod \"8d2be4c3-c046-4f12-8ded-817122672db4\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.951141 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-dns-svc\") pod \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.951185 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgfmk\" (UniqueName: \"kubernetes.io/projected/e5fed567-39f9-4096-96b7-e9a9c2bb562a-kube-api-access-sgfmk\") pod \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.951686 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e5fed567-39f9-4096-96b7-e9a9c2bb562a" (UID: "e5fed567-39f9-4096-96b7-e9a9c2bb562a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.952019 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-config" (OuterVolumeSpecName: "config") pod "e5fed567-39f9-4096-96b7-e9a9c2bb562a" (UID: "e5fed567-39f9-4096-96b7-e9a9c2bb562a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.952150 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-config" (OuterVolumeSpecName: "config") pod "8d2be4c3-c046-4f12-8ded-817122672db4" (UID: "8d2be4c3-c046-4f12-8ded-817122672db4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.951278 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-config\") pod \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\" (UID: \"e5fed567-39f9-4096-96b7-e9a9c2bb562a\") " Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.952465 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-dns-svc\") pod \"8d2be4c3-c046-4f12-8ded-817122672db4\" (UID: \"8d2be4c3-c046-4f12-8ded-817122672db4\") " Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.952849 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.952876 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.952886 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5fed567-39f9-4096-96b7-e9a9c2bb562a-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.952938 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8d2be4c3-c046-4f12-8ded-817122672db4" (UID: "8d2be4c3-c046-4f12-8ded-817122672db4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.957245 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d2be4c3-c046-4f12-8ded-817122672db4-kube-api-access-bl5dw" (OuterVolumeSpecName: "kube-api-access-bl5dw") pod "8d2be4c3-c046-4f12-8ded-817122672db4" (UID: "8d2be4c3-c046-4f12-8ded-817122672db4"). InnerVolumeSpecName "kube-api-access-bl5dw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:57 crc kubenswrapper[4647]: I1128 15:40:57.957843 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5fed567-39f9-4096-96b7-e9a9c2bb562a-kube-api-access-sgfmk" (OuterVolumeSpecName: "kube-api-access-sgfmk") pod "e5fed567-39f9-4096-96b7-e9a9c2bb562a" (UID: "e5fed567-39f9-4096-96b7-e9a9c2bb562a"). InnerVolumeSpecName "kube-api-access-sgfmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.055587 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgfmk\" (UniqueName: \"kubernetes.io/projected/e5fed567-39f9-4096-96b7-e9a9c2bb562a-kube-api-access-sgfmk\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.055640 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d2be4c3-c046-4f12-8ded-817122672db4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.055656 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl5dw\" (UniqueName: \"kubernetes.io/projected/8d2be4c3-c046-4f12-8ded-817122672db4-kube-api-access-bl5dw\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.534972 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" event={"ID":"e5fed567-39f9-4096-96b7-e9a9c2bb562a","Type":"ContainerDied","Data":"77f26ce93c4d63ca8a54681981b95c4f03dc4bbdbf06854045a1cdb4009269de"} Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.535965 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-fl2lz" Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.538836 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" event={"ID":"8d2be4c3-c046-4f12-8ded-817122672db4","Type":"ContainerDied","Data":"44fbfd8e9e94c6c4ad9a92ea3d0314ccd13f21082754694b693034b3658d75c5"} Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.538949 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-rmcsh" Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.615639 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-fl2lz"] Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.624438 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-fl2lz"] Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.640052 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-rmcsh"] Nov 28 15:40:58 crc kubenswrapper[4647]: I1128 15:40:58.646636 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-rmcsh"] Nov 28 15:41:00 crc kubenswrapper[4647]: I1128 15:41:00.412367 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d2be4c3-c046-4f12-8ded-817122672db4" path="/var/lib/kubelet/pods/8d2be4c3-c046-4f12-8ded-817122672db4/volumes" Nov 28 15:41:00 crc kubenswrapper[4647]: I1128 15:41:00.413321 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5fed567-39f9-4096-96b7-e9a9c2bb562a" path="/var/lib/kubelet/pods/e5fed567-39f9-4096-96b7-e9a9c2bb562a/volumes" Nov 28 15:41:05 crc kubenswrapper[4647]: I1128 15:41:05.728666 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-9wnbc"] Nov 28 15:41:05 crc kubenswrapper[4647]: I1128 15:41:05.883424 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.153565 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.216164 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9307fb94-8cf4-4669-a01e-bd57d6878d44-config\") pod \"9307fb94-8cf4-4669-a01e-bd57d6878d44\" (UID: \"9307fb94-8cf4-4669-a01e-bd57d6878d44\") " Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.216619 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wcxg\" (UniqueName: \"kubernetes.io/projected/9307fb94-8cf4-4669-a01e-bd57d6878d44-kube-api-access-9wcxg\") pod \"9307fb94-8cf4-4669-a01e-bd57d6878d44\" (UID: \"9307fb94-8cf4-4669-a01e-bd57d6878d44\") " Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.216897 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9307fb94-8cf4-4669-a01e-bd57d6878d44-config" (OuterVolumeSpecName: "config") pod "9307fb94-8cf4-4669-a01e-bd57d6878d44" (UID: "9307fb94-8cf4-4669-a01e-bd57d6878d44"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.217264 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9307fb94-8cf4-4669-a01e-bd57d6878d44-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.226493 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9307fb94-8cf4-4669-a01e-bd57d6878d44-kube-api-access-9wcxg" (OuterVolumeSpecName: "kube-api-access-9wcxg") pod "9307fb94-8cf4-4669-a01e-bd57d6878d44" (UID: "9307fb94-8cf4-4669-a01e-bd57d6878d44"). InnerVolumeSpecName "kube-api-access-9wcxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.320070 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wcxg\" (UniqueName: \"kubernetes.io/projected/9307fb94-8cf4-4669-a01e-bd57d6878d44-kube-api-access-9wcxg\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.640828 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" event={"ID":"9307fb94-8cf4-4669-a01e-bd57d6878d44","Type":"ContainerDied","Data":"4f4541dc1973284661c028af8255769b53379d406ec32a63caed2876151d9c51"} Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.640939 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-zg2lv" Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.698390 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zg2lv"] Nov 28 15:41:06 crc kubenswrapper[4647]: I1128 15:41:06.698466 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zg2lv"] Nov 28 15:41:07 crc kubenswrapper[4647]: E1128 15:41:07.487032 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Nov 28 15:41:07 crc kubenswrapper[4647]: E1128 15:41:07.487483 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},EnvVar{Name:CONFIG_HASH,Value:n76h694h54bhd9h66bh95h554h589h674hb6h66chd8h96h66h68bh596h68fh554h5fdhf6h58h545h558h59dh685h686h76h584h597hdch64fh678q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovs-rundir,ReadOnly:true,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-rundir,ReadOnly:true,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7g6gc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-metrics-zql68_openstack(083c881c-8e40-4d03-b4f1-91af7bcd2cd1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:41:07 crc kubenswrapper[4647]: E1128 15:41:07.488769 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-metrics-zql68" podUID="083c881c-8e40-4d03-b4f1-91af7bcd2cd1" Nov 28 15:41:07 crc kubenswrapper[4647]: I1128 15:41:07.648728 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"de3257de-cbde-4dca-89c7-21af1617cc66","Type":"ContainerStarted","Data":"a024a94f713c733b8b2f50cbaeb2b7fe4f455923e3fccdd70cf5e5160292beba"} Nov 28 15:41:07 crc kubenswrapper[4647]: I1128 15:41:07.649861 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" event={"ID":"6d088a31-8d23-4030-8398-de9b589b301c","Type":"ContainerStarted","Data":"59b3e7789d51f128b77956a57371dcade48a72efd398badebe7366fd21513f68"} Nov 28 15:41:07 crc kubenswrapper[4647]: E1128 15:41:07.651077 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovn-controller-metrics-zql68" podUID="083c881c-8e40-4d03-b4f1-91af7bcd2cd1" Nov 28 15:41:08 crc kubenswrapper[4647]: I1128 15:41:08.411100 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9307fb94-8cf4-4669-a01e-bd57d6878d44" path="/var/lib/kubelet/pods/9307fb94-8cf4-4669-a01e-bd57d6878d44/volumes" Nov 28 15:41:08 crc kubenswrapper[4647]: I1128 15:41:08.659259 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0f876088-07c2-4cb0-8096-681aaf594d6a","Type":"ContainerStarted","Data":"cc8f8b70325d66439b346ebd106ef921f3ff8d5c3280bb252a0c54dfba7dd205"} Nov 28 15:41:08 crc kubenswrapper[4647]: I1128 15:41:08.665349 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vh9b5" event={"ID":"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b","Type":"ContainerStarted","Data":"f1e790394add757b8aeef71317b80f4c18d5306b1f102ea7e95c08a3062d5551"} Nov 28 15:41:08 crc kubenswrapper[4647]: I1128 15:41:08.669018 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"b7a9cfb5-c9cd-45ee-906e-70926173aa87","Type":"ContainerStarted","Data":"0aa91f824bae3fdf6013c2a1123111abf08b76df6496f0954a98260630d65ca6"} Nov 28 15:41:08 crc kubenswrapper[4647]: I1128 15:41:08.669800 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 15:41:09 crc kubenswrapper[4647]: E1128 15:41:09.055318 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 28 15:41:09 crc kubenswrapper[4647]: E1128 15:41:09.055376 4647 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 28 15:41:09 crc kubenswrapper[4647]: E1128 15:41:09.055566 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zt6xx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(4ff96da1-31d9-4b0c-8fbc-32c25a416a5c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:41:09 crc kubenswrapper[4647]: E1128 15:41:09.056763 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="4ff96da1-31d9-4b0c-8fbc-32c25a416a5c" Nov 28 15:41:09 crc kubenswrapper[4647]: I1128 15:41:09.686399 4647 generic.go:334] "Generic (PLEG): container finished" podID="a5d1c6f3-2c4f-4046-81fc-c2e210100c4b" containerID="f1e790394add757b8aeef71317b80f4c18d5306b1f102ea7e95c08a3062d5551" exitCode=0 Nov 28 15:41:09 crc kubenswrapper[4647]: I1128 15:41:09.686657 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vh9b5" event={"ID":"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b","Type":"ContainerDied","Data":"f1e790394add757b8aeef71317b80f4c18d5306b1f102ea7e95c08a3062d5551"} Nov 28 15:41:09 crc kubenswrapper[4647]: I1128 15:41:09.694661 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0","Type":"ContainerStarted","Data":"11e6239220088ef0e49d5724afae2ba6c3bdea30387ba6db58b42fd2e4ab67de"} Nov 28 15:41:09 crc kubenswrapper[4647]: I1128 15:41:09.701252 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"63a87633-1166-4787-99ee-ec4a5fd02b87","Type":"ContainerStarted","Data":"a37dec4867423b811354db3e62c36120742cf4fbc035f99e2fff02abb669ff9f"} Nov 28 15:41:09 crc kubenswrapper[4647]: I1128 15:41:09.712498 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=16.198742073 podStartE2EDuration="47.712466056s" podCreationTimestamp="2025-11-28 15:40:22 +0000 UTC" firstStartedPulling="2025-11-28 15:40:23.424342394 +0000 UTC m=+953.271948805" lastFinishedPulling="2025-11-28 15:40:54.938066357 +0000 UTC m=+984.785672788" observedRunningTime="2025-11-28 15:41:08.728575574 +0000 UTC m=+998.576181995" watchObservedRunningTime="2025-11-28 15:41:09.712466056 +0000 UTC m=+999.560072467" Nov 28 15:41:09 crc kubenswrapper[4647]: E1128 15:41:09.716797 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="4ff96da1-31d9-4b0c-8fbc-32c25a416a5c" Nov 28 15:41:10 crc kubenswrapper[4647]: I1128 15:41:10.710367 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"de3257de-cbde-4dca-89c7-21af1617cc66","Type":"ContainerStarted","Data":"e30bcb2ced1b4754d19605a30eda8db84e43da8e66ff06efee4a6e17f6ed1b93"} Nov 28 15:41:10 crc kubenswrapper[4647]: I1128 15:41:10.712214 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-4psvg" event={"ID":"fab755a9-f20f-4bc6-a7e2-353396a0ce74","Type":"ContainerStarted","Data":"3313d1f7e2a85614949a25856770fba057fb485288f228d12934b39ced006567"} Nov 28 15:41:10 crc kubenswrapper[4647]: I1128 15:41:10.712370 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-4psvg" Nov 28 15:41:10 crc kubenswrapper[4647]: I1128 15:41:10.713847 4647 generic.go:334] "Generic (PLEG): container finished" podID="6d088a31-8d23-4030-8398-de9b589b301c" containerID="4797314bf2202fb20066781b603ae3df68d88029b67c849754bb4a8e03ac45f3" exitCode=0 Nov 28 15:41:10 crc kubenswrapper[4647]: I1128 15:41:10.713910 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" event={"ID":"6d088a31-8d23-4030-8398-de9b589b301c","Type":"ContainerDied","Data":"4797314bf2202fb20066781b603ae3df68d88029b67c849754bb4a8e03ac45f3"} Nov 28 15:41:10 crc kubenswrapper[4647]: I1128 15:41:10.717381 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vh9b5" event={"ID":"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b","Type":"ContainerStarted","Data":"7aa1ba8f35b2461190700aee42eede6f0ac087db18417972862def6c87397234"} Nov 28 15:41:10 crc kubenswrapper[4647]: I1128 15:41:10.717421 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vh9b5" event={"ID":"a5d1c6f3-2c4f-4046-81fc-c2e210100c4b","Type":"ContainerStarted","Data":"1146f798890787207f6d59afb59d707bd6e2bfae8ef2e07fdaf7ce21352bffc8"} Nov 28 15:41:10 crc kubenswrapper[4647]: I1128 15:41:10.745806 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-4psvg" podStartSLOduration=4.710136575 podStartE2EDuration="42.745786912s" podCreationTimestamp="2025-11-28 15:40:28 +0000 UTC" firstStartedPulling="2025-11-28 15:40:29.642046222 +0000 UTC m=+959.489652643" lastFinishedPulling="2025-11-28 15:41:07.677696569 +0000 UTC m=+997.525302980" observedRunningTime="2025-11-28 15:41:10.737891105 +0000 UTC m=+1000.585497536" watchObservedRunningTime="2025-11-28 15:41:10.745786912 +0000 UTC m=+1000.593393343" Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.724976 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"49c7e330-cae6-469f-9a44-7087cc112af1","Type":"ContainerStarted","Data":"216cb0060d00175d92f57504f5d155efb9cc08933244d34a8618e47e828942fb"} Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.727948 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"de3257de-cbde-4dca-89c7-21af1617cc66","Type":"ContainerStarted","Data":"b565601817554faaea2fc2610e9026f74f53e4f3b2893e400e27044da175de3b"} Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.729974 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0","Type":"ContainerStarted","Data":"b007f463ead9752bc76839371fce90fbf5de3f8c03969416f6e1844a22b4bee1"} Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.731755 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" event={"ID":"6d088a31-8d23-4030-8398-de9b589b301c","Type":"ContainerStarted","Data":"df595d39ad610286bb88b265c91145edaa55c9dcd50596c58c55587fa8be8f0a"} Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.732156 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.735993 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"27fe6c77-c0f2-4398-a337-133eaca78fb4","Type":"ContainerStarted","Data":"c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914"} Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.737404 4647 generic.go:334] "Generic (PLEG): container finished" podID="61f83b6e-cf96-4134-b269-113b8ed46a48" containerID="a8378b066c9ab2244ce3648d056f806b9f720a0591c737a8e755dd51f50fc958" exitCode=0 Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.737648 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" event={"ID":"61f83b6e-cf96-4134-b269-113b8ed46a48","Type":"ContainerDied","Data":"a8378b066c9ab2244ce3648d056f806b9f720a0591c737a8e755dd51f50fc958"} Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.738277 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.738299 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.893831 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=5.322954811 podStartE2EDuration="45.893805798s" podCreationTimestamp="2025-11-28 15:40:26 +0000 UTC" firstStartedPulling="2025-11-28 15:40:29.794740787 +0000 UTC m=+959.642347208" lastFinishedPulling="2025-11-28 15:41:10.365591774 +0000 UTC m=+1000.213198195" observedRunningTime="2025-11-28 15:41:11.824591247 +0000 UTC m=+1001.672197668" watchObservedRunningTime="2025-11-28 15:41:11.893805798 +0000 UTC m=+1001.741412219" Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.898196 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=38.614430349 podStartE2EDuration="41.898180623s" podCreationTimestamp="2025-11-28 15:40:30 +0000 UTC" firstStartedPulling="2025-11-28 15:41:07.500244637 +0000 UTC m=+997.347851058" lastFinishedPulling="2025-11-28 15:41:10.783994911 +0000 UTC m=+1000.631601332" observedRunningTime="2025-11-28 15:41:11.872362337 +0000 UTC m=+1001.719968758" watchObservedRunningTime="2025-11-28 15:41:11.898180623 +0000 UTC m=+1001.745787044" Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.949912 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-vh9b5" podStartSLOduration=16.099817918 podStartE2EDuration="43.949883695s" podCreationTimestamp="2025-11-28 15:40:28 +0000 UTC" firstStartedPulling="2025-11-28 15:40:30.227957241 +0000 UTC m=+960.075563662" lastFinishedPulling="2025-11-28 15:40:58.078023018 +0000 UTC m=+987.925629439" observedRunningTime="2025-11-28 15:41:11.942530243 +0000 UTC m=+1001.790136664" watchObservedRunningTime="2025-11-28 15:41:11.949883695 +0000 UTC m=+1001.797490116" Nov 28 15:41:11 crc kubenswrapper[4647]: I1128 15:41:11.982236 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" podStartSLOduration=40.108365855 podStartE2EDuration="41.982208311s" podCreationTimestamp="2025-11-28 15:40:30 +0000 UTC" firstStartedPulling="2025-11-28 15:41:07.486686802 +0000 UTC m=+997.334293223" lastFinishedPulling="2025-11-28 15:41:09.360529258 +0000 UTC m=+999.208135679" observedRunningTime="2025-11-28 15:41:11.976541343 +0000 UTC m=+1001.824147764" watchObservedRunningTime="2025-11-28 15:41:11.982208311 +0000 UTC m=+1001.829814732" Nov 28 15:41:12 crc kubenswrapper[4647]: I1128 15:41:12.718779 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 15:41:12 crc kubenswrapper[4647]: I1128 15:41:12.750142 4647 generic.go:334] "Generic (PLEG): container finished" podID="0f876088-07c2-4cb0-8096-681aaf594d6a" containerID="cc8f8b70325d66439b346ebd106ef921f3ff8d5c3280bb252a0c54dfba7dd205" exitCode=0 Nov 28 15:41:12 crc kubenswrapper[4647]: I1128 15:41:12.750232 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0f876088-07c2-4cb0-8096-681aaf594d6a","Type":"ContainerDied","Data":"cc8f8b70325d66439b346ebd106ef921f3ff8d5c3280bb252a0c54dfba7dd205"} Nov 28 15:41:12 crc kubenswrapper[4647]: I1128 15:41:12.755279 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" event={"ID":"61f83b6e-cf96-4134-b269-113b8ed46a48","Type":"ContainerStarted","Data":"4b3a7166e825e271cc199b5394da10aa7f79583818264275c976aa56171659b0"} Nov 28 15:41:12 crc kubenswrapper[4647]: I1128 15:41:12.824101 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" podStartSLOduration=-9223371982.030697 podStartE2EDuration="54.824078138s" podCreationTimestamp="2025-11-28 15:40:18 +0000 UTC" firstStartedPulling="2025-11-28 15:40:19.388532373 +0000 UTC m=+949.236138794" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:12.813696366 +0000 UTC m=+1002.661302787" watchObservedRunningTime="2025-11-28 15:41:12.824078138 +0000 UTC m=+1002.671684559" Nov 28 15:41:13 crc kubenswrapper[4647]: I1128 15:41:13.144644 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 15:41:13 crc kubenswrapper[4647]: I1128 15:41:13.145578 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 15:41:13 crc kubenswrapper[4647]: I1128 15:41:13.185856 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 15:41:13 crc kubenswrapper[4647]: I1128 15:41:13.495507 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 15:41:13 crc kubenswrapper[4647]: I1128 15:41:13.543960 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 15:41:13 crc kubenswrapper[4647]: I1128 15:41:13.769127 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:41:13 crc kubenswrapper[4647]: I1128 15:41:13.769931 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0f876088-07c2-4cb0-8096-681aaf594d6a","Type":"ContainerStarted","Data":"cf0e1b818be23a59721f29139511c14e425151e02b9369f280ec0ec28fa8ad15"} Nov 28 15:41:13 crc kubenswrapper[4647]: I1128 15:41:13.771389 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 15:41:13 crc kubenswrapper[4647]: I1128 15:41:13.801292 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=22.202262831 podStartE2EDuration="53.801266704s" podCreationTimestamp="2025-11-28 15:40:20 +0000 UTC" firstStartedPulling="2025-11-28 15:40:23.344327221 +0000 UTC m=+953.191933642" lastFinishedPulling="2025-11-28 15:40:54.943331084 +0000 UTC m=+984.790937515" observedRunningTime="2025-11-28 15:41:13.799046246 +0000 UTC m=+1003.646652677" watchObservedRunningTime="2025-11-28 15:41:13.801266704 +0000 UTC m=+1003.648873125" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.469544 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2lcfv"] Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.521735 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-kfldn"] Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.523010 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.604111 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-dns-svc\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.604182 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62d69\" (UniqueName: \"kubernetes.io/projected/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-kube-api-access-62d69\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.604209 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-config\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.604233 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-ovsdbserver-sb\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.605161 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-kfldn"] Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.706338 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-dns-svc\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.706830 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62d69\" (UniqueName: \"kubernetes.io/projected/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-kube-api-access-62d69\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.706931 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-config\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.707021 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-ovsdbserver-sb\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.707305 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-dns-svc\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.707922 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-config\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.708502 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-ovsdbserver-sb\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.746466 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62d69\" (UniqueName: \"kubernetes.io/projected/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-kube-api-access-62d69\") pod \"dnsmasq-dns-8cc7fc4dc-kfldn\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.777704 4647 generic.go:334] "Generic (PLEG): container finished" podID="63a87633-1166-4787-99ee-ec4a5fd02b87" containerID="a37dec4867423b811354db3e62c36120742cf4fbc035f99e2fff02abb669ff9f" exitCode=0 Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.777821 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"63a87633-1166-4787-99ee-ec4a5fd02b87","Type":"ContainerDied","Data":"a37dec4867423b811354db3e62c36120742cf4fbc035f99e2fff02abb669ff9f"} Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.778025 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" podUID="61f83b6e-cf96-4134-b269-113b8ed46a48" containerName="dnsmasq-dns" containerID="cri-o://4b3a7166e825e271cc199b5394da10aa7f79583818264275c976aa56171659b0" gracePeriod=10 Nov 28 15:41:14 crc kubenswrapper[4647]: I1128 15:41:14.845181 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.105694 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.129665 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.447588 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-9wnbc"] Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.451603 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" podUID="6d088a31-8d23-4030-8398-de9b589b301c" containerName="dnsmasq-dns" containerID="cri-o://df595d39ad610286bb88b265c91145edaa55c9dcd50596c58c55587fa8be8f0a" gracePeriod=10 Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.485807 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-krsc9"] Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.492396 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.494307 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.503079 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-krsc9"] Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.548576 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.548634 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.548670 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbvxl\" (UniqueName: \"kubernetes.io/projected/1de90ce8-1534-4003-b298-49df66ba86f1-kube-api-access-rbvxl\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.548728 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.548803 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-config\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: W1128 15:41:15.591070 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6eb2e696_79ba_428d_9a1c_6c0e327ada4e.slice/crio-1182629ea130543b4c6e652c7b4f0fb939afabb6c1ce871a10ef5f8194f8cc71 WatchSource:0}: Error finding container 1182629ea130543b4c6e652c7b4f0fb939afabb6c1ce871a10ef5f8194f8cc71: Status 404 returned error can't find the container with id 1182629ea130543b4c6e652c7b4f0fb939afabb6c1ce871a10ef5f8194f8cc71 Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.596365 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-kfldn"] Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.653298 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.653355 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbvxl\" (UniqueName: \"kubernetes.io/projected/1de90ce8-1534-4003-b298-49df66ba86f1-kube-api-access-rbvxl\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.653387 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.653482 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-config\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.653573 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.654493 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.655070 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-config\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.656154 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.668937 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.697076 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.733381 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbvxl\" (UniqueName: \"kubernetes.io/projected/1de90ce8-1534-4003-b298-49df66ba86f1-kube-api-access-rbvxl\") pod \"dnsmasq-dns-b8fbc5445-krsc9\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.802062 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.805085 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.806959 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.815219 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.815863 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-t8nr5" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.816148 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.816402 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.816803 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.819152 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-sgk6r" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.819291 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.820586 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.829424 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:15 crc kubenswrapper[4647]: E1128 15:41:15.834351 4647 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d088a31_8d23_4030_8398_de9b589b301c.slice/crio-conmon-df595d39ad610286bb88b265c91145edaa55c9dcd50596c58c55587fa8be8f0a.scope\": RecentStats: unable to find data in memory cache]" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.847644 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.849637 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"63a87633-1166-4787-99ee-ec4a5fd02b87","Type":"ContainerStarted","Data":"310fd877f9f08dbf2d3262f81b6bb2621f8edc7880833c2e2d3612dc16072652"} Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.857665 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f5edb07c-df1f-4434-8608-97841a748dd2-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.857791 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5edb07c-df1f-4434-8608-97841a748dd2-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.857878 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5edb07c-df1f-4434-8608-97841a748dd2-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.857955 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kh6v\" (UniqueName: \"kubernetes.io/projected/f5edb07c-df1f-4434-8608-97841a748dd2-kube-api-access-8kh6v\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.858031 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5edb07c-df1f-4434-8608-97841a748dd2-config\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.858111 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5edb07c-df1f-4434-8608-97841a748dd2-scripts\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.858214 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5edb07c-df1f-4434-8608-97841a748dd2-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.865396 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.929725 4647 generic.go:334] "Generic (PLEG): container finished" podID="61f83b6e-cf96-4134-b269-113b8ed46a48" containerID="4b3a7166e825e271cc199b5394da10aa7f79583818264275c976aa56171659b0" exitCode=0 Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.931796 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" event={"ID":"61f83b6e-cf96-4134-b269-113b8ed46a48","Type":"ContainerDied","Data":"4b3a7166e825e271cc199b5394da10aa7f79583818264275c976aa56171659b0"} Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.953200 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" event={"ID":"6eb2e696-79ba-428d-9a1c-6c0e327ada4e","Type":"ContainerStarted","Data":"1182629ea130543b4c6e652c7b4f0fb939afabb6c1ce871a10ef5f8194f8cc71"} Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.959794 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f5edb07c-df1f-4434-8608-97841a748dd2-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.959845 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-cache\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.959868 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5edb07c-df1f-4434-8608-97841a748dd2-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.959895 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5edb07c-df1f-4434-8608-97841a748dd2-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.959920 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kh6v\" (UniqueName: \"kubernetes.io/projected/f5edb07c-df1f-4434-8608-97841a748dd2-kube-api-access-8kh6v\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.959939 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.959976 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5edb07c-df1f-4434-8608-97841a748dd2-config\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.960003 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5edb07c-df1f-4434-8608-97841a748dd2-scripts\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.960042 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-lock\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.960061 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntq9c\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-kube-api-access-ntq9c\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.960094 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5edb07c-df1f-4434-8608-97841a748dd2-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.960134 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.960642 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f5edb07c-df1f-4434-8608-97841a748dd2-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.961257 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5edb07c-df1f-4434-8608-97841a748dd2-config\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.961989 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f5edb07c-df1f-4434-8608-97841a748dd2-scripts\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.962961 4647 generic.go:334] "Generic (PLEG): container finished" podID="6d088a31-8d23-4030-8398-de9b589b301c" containerID="df595d39ad610286bb88b265c91145edaa55c9dcd50596c58c55587fa8be8f0a" exitCode=0 Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.963276 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" event={"ID":"6d088a31-8d23-4030-8398-de9b589b301c","Type":"ContainerDied","Data":"df595d39ad610286bb88b265c91145edaa55c9dcd50596c58c55587fa8be8f0a"} Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.972557 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5edb07c-df1f-4434-8608-97841a748dd2-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:15 crc kubenswrapper[4647]: I1128 15:41:15.982705 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5edb07c-df1f-4434-8608-97841a748dd2-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:15.998744 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371979.85605 podStartE2EDuration="56.998724488s" podCreationTimestamp="2025-11-28 15:40:19 +0000 UTC" firstStartedPulling="2025-11-28 15:40:22.092206411 +0000 UTC m=+951.939812832" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:15.987150645 +0000 UTC m=+1005.834757066" watchObservedRunningTime="2025-11-28 15:41:15.998724488 +0000 UTC m=+1005.846330909" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:15.999633 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5edb07c-df1f-4434-8608-97841a748dd2-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.029487 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kh6v\" (UniqueName: \"kubernetes.io/projected/f5edb07c-df1f-4434-8608-97841a748dd2-kube-api-access-8kh6v\") pod \"ovn-northd-0\" (UID: \"f5edb07c-df1f-4434-8608-97841a748dd2\") " pod="openstack/ovn-northd-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.061585 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-cache\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.061892 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.062095 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-lock\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.062119 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntq9c\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-kube-api-access-ntq9c\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.062261 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: E1128 15:41:16.063332 4647 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:41:16 crc kubenswrapper[4647]: E1128 15:41:16.063357 4647 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:41:16 crc kubenswrapper[4647]: E1128 15:41:16.063429 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift podName:ab7ffd14-cb79-40b8-854d-1dd1deca75f2 nodeName:}" failed. No retries permitted until 2025-11-28 15:41:16.56339458 +0000 UTC m=+1006.411001001 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift") pod "swift-storage-0" (UID: "ab7ffd14-cb79-40b8-854d-1dd1deca75f2") : configmap "swift-ring-files" not found Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.065774 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-cache\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.065966 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.066942 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-lock\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.102752 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntq9c\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-kube-api-access-ntq9c\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.103468 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.153130 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" podUID="6d088a31-8d23-4030-8398-de9b589b301c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.107:5353: connect: connection refused" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.235187 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.264796 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.372150 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-dns-svc\") pod \"61f83b6e-cf96-4134-b269-113b8ed46a48\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.372497 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-config\") pod \"61f83b6e-cf96-4134-b269-113b8ed46a48\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.372691 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snlkb\" (UniqueName: \"kubernetes.io/projected/61f83b6e-cf96-4134-b269-113b8ed46a48-kube-api-access-snlkb\") pod \"61f83b6e-cf96-4134-b269-113b8ed46a48\" (UID: \"61f83b6e-cf96-4134-b269-113b8ed46a48\") " Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.378197 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61f83b6e-cf96-4134-b269-113b8ed46a48-kube-api-access-snlkb" (OuterVolumeSpecName: "kube-api-access-snlkb") pod "61f83b6e-cf96-4134-b269-113b8ed46a48" (UID: "61f83b6e-cf96-4134-b269-113b8ed46a48"). InnerVolumeSpecName "kube-api-access-snlkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.483433 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snlkb\" (UniqueName: \"kubernetes.io/projected/61f83b6e-cf96-4134-b269-113b8ed46a48-kube-api-access-snlkb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.504202 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "61f83b6e-cf96-4134-b269-113b8ed46a48" (UID: "61f83b6e-cf96-4134-b269-113b8ed46a48"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.512135 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-config" (OuterVolumeSpecName: "config") pod "61f83b6e-cf96-4134-b269-113b8ed46a48" (UID: "61f83b6e-cf96-4134-b269-113b8ed46a48"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.584234 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-krsc9"] Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.590075 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.594662 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.594690 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61f83b6e-cf96-4134-b269-113b8ed46a48-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:16 crc kubenswrapper[4647]: E1128 15:41:16.594819 4647 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:41:16 crc kubenswrapper[4647]: E1128 15:41:16.594835 4647 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:41:16 crc kubenswrapper[4647]: E1128 15:41:16.594887 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift podName:ab7ffd14-cb79-40b8-854d-1dd1deca75f2 nodeName:}" failed. No retries permitted until 2025-11-28 15:41:17.594867964 +0000 UTC m=+1007.442474385 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift") pod "swift-storage-0" (UID: "ab7ffd14-cb79-40b8-854d-1dd1deca75f2") : configmap "swift-ring-files" not found Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.683542 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.797265 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-ovsdbserver-sb\") pod \"6d088a31-8d23-4030-8398-de9b589b301c\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.797322 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-config\") pod \"6d088a31-8d23-4030-8398-de9b589b301c\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.797360 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-dns-svc\") pod \"6d088a31-8d23-4030-8398-de9b589b301c\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.797431 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d66xf\" (UniqueName: \"kubernetes.io/projected/6d088a31-8d23-4030-8398-de9b589b301c-kube-api-access-d66xf\") pod \"6d088a31-8d23-4030-8398-de9b589b301c\" (UID: \"6d088a31-8d23-4030-8398-de9b589b301c\") " Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.813099 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d088a31-8d23-4030-8398-de9b589b301c-kube-api-access-d66xf" (OuterVolumeSpecName: "kube-api-access-d66xf") pod "6d088a31-8d23-4030-8398-de9b589b301c" (UID: "6d088a31-8d23-4030-8398-de9b589b301c"). InnerVolumeSpecName "kube-api-access-d66xf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.852155 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6d088a31-8d23-4030-8398-de9b589b301c" (UID: "6d088a31-8d23-4030-8398-de9b589b301c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.855134 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-config" (OuterVolumeSpecName: "config") pod "6d088a31-8d23-4030-8398-de9b589b301c" (UID: "6d088a31-8d23-4030-8398-de9b589b301c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.865971 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6d088a31-8d23-4030-8398-de9b589b301c" (UID: "6d088a31-8d23-4030-8398-de9b589b301c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.884373 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.899246 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.899612 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.899624 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d088a31-8d23-4030-8398-de9b589b301c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.899634 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d66xf\" (UniqueName: \"kubernetes.io/projected/6d088a31-8d23-4030-8398-de9b589b301c-kube-api-access-d66xf\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.972252 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" event={"ID":"61f83b6e-cf96-4134-b269-113b8ed46a48","Type":"ContainerDied","Data":"c9a8fa09c4c48d68e10611d6fc002d69778f9a32efae49412b9df1e7aa6435fb"} Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.972311 4647 scope.go:117] "RemoveContainer" containerID="4b3a7166e825e271cc199b5394da10aa7f79583818264275c976aa56171659b0" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.972510 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-2lcfv" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.976185 4647 generic.go:334] "Generic (PLEG): container finished" podID="6eb2e696-79ba-428d-9a1c-6c0e327ada4e" containerID="f1c18c1ca3eadee1c65ae539c3a1ae39297e8733f3bdb84b6120bba5b2c3d4b3" exitCode=0 Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.976268 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" event={"ID":"6eb2e696-79ba-428d-9a1c-6c0e327ada4e","Type":"ContainerDied","Data":"f1c18c1ca3eadee1c65ae539c3a1ae39297e8733f3bdb84b6120bba5b2c3d4b3"} Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.980523 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f5edb07c-df1f-4434-8608-97841a748dd2","Type":"ContainerStarted","Data":"fac45191eb522ed5287541c06092117990c61840574aa762afdefc7bebcba868"} Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.983869 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" event={"ID":"1de90ce8-1534-4003-b298-49df66ba86f1","Type":"ContainerStarted","Data":"27d466c6a6f733e87ae284b8e1b137caf73d0e0bc405efabf2b7918b251a8d85"} Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.992296 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" Nov 28 15:41:16 crc kubenswrapper[4647]: I1128 15:41:16.993765 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-9wnbc" event={"ID":"6d088a31-8d23-4030-8398-de9b589b301c","Type":"ContainerDied","Data":"59b3e7789d51f128b77956a57371dcade48a72efd398badebe7366fd21513f68"} Nov 28 15:41:17 crc kubenswrapper[4647]: I1128 15:41:17.004272 4647 scope.go:117] "RemoveContainer" containerID="a8378b066c9ab2244ce3648d056f806b9f720a0591c737a8e755dd51f50fc958" Nov 28 15:41:17 crc kubenswrapper[4647]: I1128 15:41:17.085587 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2lcfv"] Nov 28 15:41:17 crc kubenswrapper[4647]: I1128 15:41:17.093025 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2lcfv"] Nov 28 15:41:17 crc kubenswrapper[4647]: I1128 15:41:17.096643 4647 scope.go:117] "RemoveContainer" containerID="df595d39ad610286bb88b265c91145edaa55c9dcd50596c58c55587fa8be8f0a" Nov 28 15:41:17 crc kubenswrapper[4647]: I1128 15:41:17.106884 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-9wnbc"] Nov 28 15:41:17 crc kubenswrapper[4647]: I1128 15:41:17.113036 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-9wnbc"] Nov 28 15:41:17 crc kubenswrapper[4647]: I1128 15:41:17.126826 4647 scope.go:117] "RemoveContainer" containerID="4797314bf2202fb20066781b603ae3df68d88029b67c849754bb4a8e03ac45f3" Nov 28 15:41:17 crc kubenswrapper[4647]: I1128 15:41:17.613599 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:17 crc kubenswrapper[4647]: E1128 15:41:17.613863 4647 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:41:17 crc kubenswrapper[4647]: E1128 15:41:17.613911 4647 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:41:17 crc kubenswrapper[4647]: E1128 15:41:17.613997 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift podName:ab7ffd14-cb79-40b8-854d-1dd1deca75f2 nodeName:}" failed. No retries permitted until 2025-11-28 15:41:19.613970398 +0000 UTC m=+1009.461576819 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift") pod "swift-storage-0" (UID: "ab7ffd14-cb79-40b8-854d-1dd1deca75f2") : configmap "swift-ring-files" not found Nov 28 15:41:18 crc kubenswrapper[4647]: I1128 15:41:18.018641 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" event={"ID":"6eb2e696-79ba-428d-9a1c-6c0e327ada4e","Type":"ContainerStarted","Data":"61f2e99f23cb5de1e9381be229cc727b52b9faa1974345a2d3e6fe76142ccd4c"} Nov 28 15:41:18 crc kubenswrapper[4647]: I1128 15:41:18.019865 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:18 crc kubenswrapper[4647]: I1128 15:41:18.025091 4647 generic.go:334] "Generic (PLEG): container finished" podID="1de90ce8-1534-4003-b298-49df66ba86f1" containerID="61467da5271e1a0fdd32fbdb0f6df331cf73f80464bce7002140f82a8de6c7e8" exitCode=0 Nov 28 15:41:18 crc kubenswrapper[4647]: I1128 15:41:18.025212 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" event={"ID":"1de90ce8-1534-4003-b298-49df66ba86f1","Type":"ContainerDied","Data":"61467da5271e1a0fdd32fbdb0f6df331cf73f80464bce7002140f82a8de6c7e8"} Nov 28 15:41:18 crc kubenswrapper[4647]: I1128 15:41:18.043665 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" podStartSLOduration=4.04364349 podStartE2EDuration="4.04364349s" podCreationTimestamp="2025-11-28 15:41:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:18.040651632 +0000 UTC m=+1007.888258053" watchObservedRunningTime="2025-11-28 15:41:18.04364349 +0000 UTC m=+1007.891249911" Nov 28 15:41:18 crc kubenswrapper[4647]: I1128 15:41:18.414102 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61f83b6e-cf96-4134-b269-113b8ed46a48" path="/var/lib/kubelet/pods/61f83b6e-cf96-4134-b269-113b8ed46a48/volumes" Nov 28 15:41:18 crc kubenswrapper[4647]: I1128 15:41:18.414951 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d088a31-8d23-4030-8398-de9b589b301c" path="/var/lib/kubelet/pods/6d088a31-8d23-4030-8398-de9b589b301c/volumes" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.038089 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" event={"ID":"1de90ce8-1534-4003-b298-49df66ba86f1","Type":"ContainerStarted","Data":"badb58cba2b5c07c01a67494cf365df196ed0be67916469e9b303908a2f267d5"} Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.038204 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.059661 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" podStartSLOduration=4.059643222 podStartE2EDuration="4.059643222s" podCreationTimestamp="2025-11-28 15:41:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:19.055758661 +0000 UTC m=+1008.903365082" watchObservedRunningTime="2025-11-28 15:41:19.059643222 +0000 UTC m=+1008.907249643" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.520784 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-bxd4t"] Nov 28 15:41:19 crc kubenswrapper[4647]: E1128 15:41:19.538356 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d088a31-8d23-4030-8398-de9b589b301c" containerName="init" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.557305 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d088a31-8d23-4030-8398-de9b589b301c" containerName="init" Nov 28 15:41:19 crc kubenswrapper[4647]: E1128 15:41:19.565578 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61f83b6e-cf96-4134-b269-113b8ed46a48" containerName="dnsmasq-dns" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.565765 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="61f83b6e-cf96-4134-b269-113b8ed46a48" containerName="dnsmasq-dns" Nov 28 15:41:19 crc kubenswrapper[4647]: E1128 15:41:19.565894 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61f83b6e-cf96-4134-b269-113b8ed46a48" containerName="init" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.565963 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="61f83b6e-cf96-4134-b269-113b8ed46a48" containerName="init" Nov 28 15:41:19 crc kubenswrapper[4647]: E1128 15:41:19.566040 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d088a31-8d23-4030-8398-de9b589b301c" containerName="dnsmasq-dns" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.566102 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d088a31-8d23-4030-8398-de9b589b301c" containerName="dnsmasq-dns" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.566988 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="61f83b6e-cf96-4134-b269-113b8ed46a48" containerName="dnsmasq-dns" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.582232 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d088a31-8d23-4030-8398-de9b589b301c" containerName="dnsmasq-dns" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.586606 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-bxd4t"] Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.586776 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.594359 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.611258 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-bxd4t"] Nov 28 15:41:19 crc kubenswrapper[4647]: E1128 15:41:19.619020 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-tx8p5 ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-tx8p5 ring-data-devices scripts swiftconf]: context canceled" pod="openstack/swift-ring-rebalance-bxd4t" podUID="3fac4587-85ac-437a-bdd1-fcb63c0f1230" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.625303 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.625653 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.633065 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-dj6b9"] Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.635682 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.638699 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-dj6b9"] Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.649864 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:19 crc kubenswrapper[4647]: E1128 15:41:19.650123 4647 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:41:19 crc kubenswrapper[4647]: E1128 15:41:19.650146 4647 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:41:19 crc kubenswrapper[4647]: E1128 15:41:19.650193 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift podName:ab7ffd14-cb79-40b8-854d-1dd1deca75f2 nodeName:}" failed. No retries permitted until 2025-11-28 15:41:23.650178643 +0000 UTC m=+1013.497785064 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift") pod "swift-storage-0" (UID: "ab7ffd14-cb79-40b8-854d-1dd1deca75f2") : configmap "swift-ring-files" not found Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.751996 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tx8p5\" (UniqueName: \"kubernetes.io/projected/3fac4587-85ac-437a-bdd1-fcb63c0f1230-kube-api-access-tx8p5\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.752044 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-swiftconf\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.752071 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b098c625-f531-4a3a-8532-fbfc7cd4f236-etc-swift\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.752126 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-scripts\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.752261 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-scripts\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.752373 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-ring-data-devices\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.752519 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3fac4587-85ac-437a-bdd1-fcb63c0f1230-etc-swift\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.752580 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-combined-ca-bundle\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.752775 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-combined-ca-bundle\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.752841 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-swiftconf\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.752940 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b79dl\" (UniqueName: \"kubernetes.io/projected/b098c625-f531-4a3a-8532-fbfc7cd4f236-kube-api-access-b79dl\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.753002 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-dispersionconf\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.753063 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-ring-data-devices\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.753104 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-dispersionconf\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854205 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tx8p5\" (UniqueName: \"kubernetes.io/projected/3fac4587-85ac-437a-bdd1-fcb63c0f1230-kube-api-access-tx8p5\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854256 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-swiftconf\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854279 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b098c625-f531-4a3a-8532-fbfc7cd4f236-etc-swift\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854329 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-scripts\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854348 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-scripts\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854381 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-ring-data-devices\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854435 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3fac4587-85ac-437a-bdd1-fcb63c0f1230-etc-swift\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854462 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-combined-ca-bundle\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854495 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-combined-ca-bundle\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854515 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-swiftconf\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854537 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b79dl\" (UniqueName: \"kubernetes.io/projected/b098c625-f531-4a3a-8532-fbfc7cd4f236-kube-api-access-b79dl\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854559 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-dispersionconf\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854574 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-ring-data-devices\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.854587 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-dispersionconf\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.855487 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3fac4587-85ac-437a-bdd1-fcb63c0f1230-etc-swift\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.856733 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-scripts\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.857554 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b098c625-f531-4a3a-8532-fbfc7cd4f236-etc-swift\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.858349 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-scripts\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.858999 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-ring-data-devices\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.861990 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-combined-ca-bundle\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.862961 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-dispersionconf\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.863338 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-ring-data-devices\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.866121 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-swiftconf\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.867682 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-swiftconf\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.872747 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-dispersionconf\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.882848 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-combined-ca-bundle\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.892707 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tx8p5\" (UniqueName: \"kubernetes.io/projected/3fac4587-85ac-437a-bdd1-fcb63c0f1230-kube-api-access-tx8p5\") pod \"swift-ring-rebalance-bxd4t\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.894127 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b79dl\" (UniqueName: \"kubernetes.io/projected/b098c625-f531-4a3a-8532-fbfc7cd4f236-kube-api-access-b79dl\") pod \"swift-ring-rebalance-dj6b9\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:19 crc kubenswrapper[4647]: I1128 15:41:19.949345 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.045145 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f5edb07c-df1f-4434-8608-97841a748dd2","Type":"ContainerStarted","Data":"fb519c19af37118bf7d26abc7b21614bb1a719805043e0e93c4f47fb9e2071f2"} Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.046063 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.055356 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.162083 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3fac4587-85ac-437a-bdd1-fcb63c0f1230-etc-swift\") pod \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.162432 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-swiftconf\") pod \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.162500 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tx8p5\" (UniqueName: \"kubernetes.io/projected/3fac4587-85ac-437a-bdd1-fcb63c0f1230-kube-api-access-tx8p5\") pod \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.162568 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-dispersionconf\") pod \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.162617 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-scripts\") pod \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.162642 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-combined-ca-bundle\") pod \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.162671 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-ring-data-devices\") pod \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\" (UID: \"3fac4587-85ac-437a-bdd1-fcb63c0f1230\") " Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.163827 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3fac4587-85ac-437a-bdd1-fcb63c0f1230-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "3fac4587-85ac-437a-bdd1-fcb63c0f1230" (UID: "3fac4587-85ac-437a-bdd1-fcb63c0f1230"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.164750 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "3fac4587-85ac-437a-bdd1-fcb63c0f1230" (UID: "3fac4587-85ac-437a-bdd1-fcb63c0f1230"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.165291 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-scripts" (OuterVolumeSpecName: "scripts") pod "3fac4587-85ac-437a-bdd1-fcb63c0f1230" (UID: "3fac4587-85ac-437a-bdd1-fcb63c0f1230"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.167359 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "3fac4587-85ac-437a-bdd1-fcb63c0f1230" (UID: "3fac4587-85ac-437a-bdd1-fcb63c0f1230"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.169559 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3fac4587-85ac-437a-bdd1-fcb63c0f1230" (UID: "3fac4587-85ac-437a-bdd1-fcb63c0f1230"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.170231 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fac4587-85ac-437a-bdd1-fcb63c0f1230-kube-api-access-tx8p5" (OuterVolumeSpecName: "kube-api-access-tx8p5") pod "3fac4587-85ac-437a-bdd1-fcb63c0f1230" (UID: "3fac4587-85ac-437a-bdd1-fcb63c0f1230"). InnerVolumeSpecName "kube-api-access-tx8p5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.170525 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "3fac4587-85ac-437a-bdd1-fcb63c0f1230" (UID: "3fac4587-85ac-437a-bdd1-fcb63c0f1230"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.265345 4647 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3fac4587-85ac-437a-bdd1-fcb63c0f1230-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.265382 4647 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.265392 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tx8p5\" (UniqueName: \"kubernetes.io/projected/3fac4587-85ac-437a-bdd1-fcb63c0f1230-kube-api-access-tx8p5\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.265402 4647 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.265426 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.265435 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fac4587-85ac-437a-bdd1-fcb63c0f1230-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.265446 4647 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3fac4587-85ac-437a-bdd1-fcb63c0f1230-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:20 crc kubenswrapper[4647]: W1128 15:41:20.436011 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb098c625_f531_4a3a_8532_fbfc7cd4f236.slice/crio-780a0d2fd93a977a293ab58f8e894ea4f357090c685f325242007ba515320978 WatchSource:0}: Error finding container 780a0d2fd93a977a293ab58f8e894ea4f357090c685f325242007ba515320978: Status 404 returned error can't find the container with id 780a0d2fd93a977a293ab58f8e894ea4f357090c685f325242007ba515320978 Nov 28 15:41:20 crc kubenswrapper[4647]: I1128 15:41:20.442936 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-dj6b9"] Nov 28 15:41:21 crc kubenswrapper[4647]: I1128 15:41:21.052549 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dj6b9" event={"ID":"b098c625-f531-4a3a-8532-fbfc7cd4f236","Type":"ContainerStarted","Data":"780a0d2fd93a977a293ab58f8e894ea4f357090c685f325242007ba515320978"} Nov 28 15:41:21 crc kubenswrapper[4647]: I1128 15:41:21.055348 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bxd4t" Nov 28 15:41:21 crc kubenswrapper[4647]: I1128 15:41:21.056762 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f5edb07c-df1f-4434-8608-97841a748dd2","Type":"ContainerStarted","Data":"09713354e087d7e6e9f97152500467dffa0f9d6c8d363545533765fa72d01399"} Nov 28 15:41:21 crc kubenswrapper[4647]: I1128 15:41:21.056793 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 15:41:21 crc kubenswrapper[4647]: I1128 15:41:21.095476 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=4.206290488 podStartE2EDuration="6.095455926s" podCreationTimestamp="2025-11-28 15:41:15 +0000 UTC" firstStartedPulling="2025-11-28 15:41:16.894175985 +0000 UTC m=+1006.741782406" lastFinishedPulling="2025-11-28 15:41:18.783341423 +0000 UTC m=+1008.630947844" observedRunningTime="2025-11-28 15:41:21.087825786 +0000 UTC m=+1010.935432217" watchObservedRunningTime="2025-11-28 15:41:21.095455926 +0000 UTC m=+1010.943062347" Nov 28 15:41:21 crc kubenswrapper[4647]: I1128 15:41:21.130477 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-bxd4t"] Nov 28 15:41:21 crc kubenswrapper[4647]: I1128 15:41:21.135105 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-bxd4t"] Nov 28 15:41:21 crc kubenswrapper[4647]: I1128 15:41:21.178349 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 15:41:21 crc kubenswrapper[4647]: I1128 15:41:21.178465 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 15:41:22 crc kubenswrapper[4647]: I1128 15:41:22.156475 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-zql68" event={"ID":"083c881c-8e40-4d03-b4f1-91af7bcd2cd1","Type":"ContainerStarted","Data":"0eb12d876d4d2da6bd86214b6b4b1b549bbe5f4011876f4a52811e3bbd1c1e23"} Nov 28 15:41:22 crc kubenswrapper[4647]: I1128 15:41:22.201901 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-zql68" podStartSLOduration=-9223371984.652895 podStartE2EDuration="52.201881425s" podCreationTimestamp="2025-11-28 15:40:30 +0000 UTC" firstStartedPulling="2025-11-28 15:40:37.005230449 +0000 UTC m=+966.852836870" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:22.187976071 +0000 UTC m=+1012.035582502" watchObservedRunningTime="2025-11-28 15:41:22.201881425 +0000 UTC m=+1012.049487846" Nov 28 15:41:22 crc kubenswrapper[4647]: I1128 15:41:22.325082 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 15:41:22 crc kubenswrapper[4647]: I1128 15:41:22.325149 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 15:41:22 crc kubenswrapper[4647]: I1128 15:41:22.405146 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3fac4587-85ac-437a-bdd1-fcb63c0f1230" path="/var/lib/kubelet/pods/3fac4587-85ac-437a-bdd1-fcb63c0f1230/volumes" Nov 28 15:41:22 crc kubenswrapper[4647]: I1128 15:41:22.426401 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 15:41:23 crc kubenswrapper[4647]: I1128 15:41:23.355815 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 15:41:23 crc kubenswrapper[4647]: I1128 15:41:23.664687 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:23 crc kubenswrapper[4647]: E1128 15:41:23.664881 4647 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:41:23 crc kubenswrapper[4647]: E1128 15:41:23.664910 4647 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:41:23 crc kubenswrapper[4647]: E1128 15:41:23.664988 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift podName:ab7ffd14-cb79-40b8-854d-1dd1deca75f2 nodeName:}" failed. No retries permitted until 2025-11-28 15:41:31.664964714 +0000 UTC m=+1021.512571135 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift") pod "swift-storage-0" (UID: "ab7ffd14-cb79-40b8-854d-1dd1deca75f2") : configmap "swift-ring-files" not found Nov 28 15:41:24 crc kubenswrapper[4647]: I1128 15:41:24.847641 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:25 crc kubenswrapper[4647]: I1128 15:41:25.713516 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 15:41:25 crc kubenswrapper[4647]: I1128 15:41:25.772454 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 15:41:25 crc kubenswrapper[4647]: I1128 15:41:25.831684 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:41:25 crc kubenswrapper[4647]: I1128 15:41:25.910141 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-kfldn"] Nov 28 15:41:25 crc kubenswrapper[4647]: I1128 15:41:25.910393 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" podUID="6eb2e696-79ba-428d-9a1c-6c0e327ada4e" containerName="dnsmasq-dns" containerID="cri-o://61f2e99f23cb5de1e9381be229cc727b52b9faa1974345a2d3e6fe76142ccd4c" gracePeriod=10 Nov 28 15:41:26 crc kubenswrapper[4647]: E1128 15:41:26.174857 4647 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6eb2e696_79ba_428d_9a1c_6c0e327ada4e.slice/crio-61f2e99f23cb5de1e9381be229cc727b52b9faa1974345a2d3e6fe76142ccd4c.scope\": RecentStats: unable to find data in memory cache]" Nov 28 15:41:26 crc kubenswrapper[4647]: I1128 15:41:26.200698 4647 generic.go:334] "Generic (PLEG): container finished" podID="6eb2e696-79ba-428d-9a1c-6c0e327ada4e" containerID="61f2e99f23cb5de1e9381be229cc727b52b9faa1974345a2d3e6fe76142ccd4c" exitCode=0 Nov 28 15:41:26 crc kubenswrapper[4647]: I1128 15:41:26.200792 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" event={"ID":"6eb2e696-79ba-428d-9a1c-6c0e327ada4e","Type":"ContainerDied","Data":"61f2e99f23cb5de1e9381be229cc727b52b9faa1974345a2d3e6fe76142ccd4c"} Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.209775 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dj6b9" event={"ID":"b098c625-f531-4a3a-8532-fbfc7cd4f236","Type":"ContainerStarted","Data":"924d6923f917f0bbadc86ceb456dbab89a51e4a8262d58d90edca246c58dc70d"} Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.213208 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4ff96da1-31d9-4b0c-8fbc-32c25a416a5c","Type":"ContainerStarted","Data":"2e75bde85ba51c8394b8568315f7a75ca517e7cb45987fe3d3a8fdd5fecd4f31"} Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.213648 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.252378 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-dj6b9" podStartSLOduration=2.908469798 podStartE2EDuration="8.252360264s" podCreationTimestamp="2025-11-28 15:41:19 +0000 UTC" firstStartedPulling="2025-11-28 15:41:20.438743334 +0000 UTC m=+1010.286349765" lastFinishedPulling="2025-11-28 15:41:25.78263381 +0000 UTC m=+1015.630240231" observedRunningTime="2025-11-28 15:41:27.232523205 +0000 UTC m=+1017.080129626" watchObservedRunningTime="2025-11-28 15:41:27.252360264 +0000 UTC m=+1017.099966685" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.264457 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.576528066 podStartE2EDuration="1m4.264438759s" podCreationTimestamp="2025-11-28 15:40:23 +0000 UTC" firstStartedPulling="2025-11-28 15:40:25.097539971 +0000 UTC m=+954.945146392" lastFinishedPulling="2025-11-28 15:41:25.785450654 +0000 UTC m=+1015.633057085" observedRunningTime="2025-11-28 15:41:27.249593841 +0000 UTC m=+1017.097200272" watchObservedRunningTime="2025-11-28 15:41:27.264438759 +0000 UTC m=+1017.112045180" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.297602 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.468398 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62d69\" (UniqueName: \"kubernetes.io/projected/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-kube-api-access-62d69\") pod \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.468482 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-dns-svc\") pod \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.468553 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-ovsdbserver-sb\") pod \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.468572 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-config\") pod \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\" (UID: \"6eb2e696-79ba-428d-9a1c-6c0e327ada4e\") " Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.507730 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-kube-api-access-62d69" (OuterVolumeSpecName: "kube-api-access-62d69") pod "6eb2e696-79ba-428d-9a1c-6c0e327ada4e" (UID: "6eb2e696-79ba-428d-9a1c-6c0e327ada4e"). InnerVolumeSpecName "kube-api-access-62d69". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.577739 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62d69\" (UniqueName: \"kubernetes.io/projected/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-kube-api-access-62d69\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.656753 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6eb2e696-79ba-428d-9a1c-6c0e327ada4e" (UID: "6eb2e696-79ba-428d-9a1c-6c0e327ada4e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.681184 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-config" (OuterVolumeSpecName: "config") pod "6eb2e696-79ba-428d-9a1c-6c0e327ada4e" (UID: "6eb2e696-79ba-428d-9a1c-6c0e327ada4e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.684093 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.684405 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.701135 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6eb2e696-79ba-428d-9a1c-6c0e327ada4e" (UID: "6eb2e696-79ba-428d-9a1c-6c0e327ada4e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:27 crc kubenswrapper[4647]: I1128 15:41:27.785961 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eb2e696-79ba-428d-9a1c-6c0e327ada4e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:28 crc kubenswrapper[4647]: I1128 15:41:28.230027 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" event={"ID":"6eb2e696-79ba-428d-9a1c-6c0e327ada4e","Type":"ContainerDied","Data":"1182629ea130543b4c6e652c7b4f0fb939afabb6c1ce871a10ef5f8194f8cc71"} Nov 28 15:41:28 crc kubenswrapper[4647]: I1128 15:41:28.230103 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cc7fc4dc-kfldn" Nov 28 15:41:28 crc kubenswrapper[4647]: I1128 15:41:28.230139 4647 scope.go:117] "RemoveContainer" containerID="61f2e99f23cb5de1e9381be229cc727b52b9faa1974345a2d3e6fe76142ccd4c" Nov 28 15:41:28 crc kubenswrapper[4647]: I1128 15:41:28.286746 4647 scope.go:117] "RemoveContainer" containerID="f1c18c1ca3eadee1c65ae539c3a1ae39297e8733f3bdb84b6120bba5b2c3d4b3" Nov 28 15:41:28 crc kubenswrapper[4647]: I1128 15:41:28.309544 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-kfldn"] Nov 28 15:41:28 crc kubenswrapper[4647]: I1128 15:41:28.317461 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-kfldn"] Nov 28 15:41:28 crc kubenswrapper[4647]: I1128 15:41:28.406782 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6eb2e696-79ba-428d-9a1c-6c0e327ada4e" path="/var/lib/kubelet/pods/6eb2e696-79ba-428d-9a1c-6c0e327ada4e/volumes" Nov 28 15:41:31 crc kubenswrapper[4647]: I1128 15:41:31.305153 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 15:41:31 crc kubenswrapper[4647]: I1128 15:41:31.764234 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:31 crc kubenswrapper[4647]: E1128 15:41:31.764488 4647 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:41:31 crc kubenswrapper[4647]: E1128 15:41:31.764508 4647 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:41:31 crc kubenswrapper[4647]: E1128 15:41:31.764579 4647 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift podName:ab7ffd14-cb79-40b8-854d-1dd1deca75f2 nodeName:}" failed. No retries permitted until 2025-11-28 15:41:47.764559708 +0000 UTC m=+1037.612166129 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift") pod "swift-storage-0" (UID: "ab7ffd14-cb79-40b8-854d-1dd1deca75f2") : configmap "swift-ring-files" not found Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.372340 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-vd5k5"] Nov 28 15:41:32 crc kubenswrapper[4647]: E1128 15:41:32.373943 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eb2e696-79ba-428d-9a1c-6c0e327ada4e" containerName="init" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.374081 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eb2e696-79ba-428d-9a1c-6c0e327ada4e" containerName="init" Nov 28 15:41:32 crc kubenswrapper[4647]: E1128 15:41:32.374186 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eb2e696-79ba-428d-9a1c-6c0e327ada4e" containerName="dnsmasq-dns" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.374270 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eb2e696-79ba-428d-9a1c-6c0e327ada4e" containerName="dnsmasq-dns" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.374624 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6eb2e696-79ba-428d-9a1c-6c0e327ada4e" containerName="dnsmasq-dns" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.375505 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vd5k5" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.411540 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-vd5k5"] Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.478101 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dspfk\" (UniqueName: \"kubernetes.io/projected/5325b221-c839-4627-ade7-88b5887633c1-kube-api-access-dspfk\") pod \"keystone-db-create-vd5k5\" (UID: \"5325b221-c839-4627-ade7-88b5887633c1\") " pod="openstack/keystone-db-create-vd5k5" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.580219 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dspfk\" (UniqueName: \"kubernetes.io/projected/5325b221-c839-4627-ade7-88b5887633c1-kube-api-access-dspfk\") pod \"keystone-db-create-vd5k5\" (UID: \"5325b221-c839-4627-ade7-88b5887633c1\") " pod="openstack/keystone-db-create-vd5k5" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.602378 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dspfk\" (UniqueName: \"kubernetes.io/projected/5325b221-c839-4627-ade7-88b5887633c1-kube-api-access-dspfk\") pod \"keystone-db-create-vd5k5\" (UID: \"5325b221-c839-4627-ade7-88b5887633c1\") " pod="openstack/keystone-db-create-vd5k5" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.658398 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-pj6vk"] Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.659504 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pj6vk" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.674800 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-pj6vk"] Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.703305 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vd5k5" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.784344 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knkfq\" (UniqueName: \"kubernetes.io/projected/6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac-kube-api-access-knkfq\") pod \"placement-db-create-pj6vk\" (UID: \"6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac\") " pod="openstack/placement-db-create-pj6vk" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.888066 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knkfq\" (UniqueName: \"kubernetes.io/projected/6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac-kube-api-access-knkfq\") pod \"placement-db-create-pj6vk\" (UID: \"6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac\") " pod="openstack/placement-db-create-pj6vk" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.913174 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knkfq\" (UniqueName: \"kubernetes.io/projected/6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac-kube-api-access-knkfq\") pod \"placement-db-create-pj6vk\" (UID: \"6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac\") " pod="openstack/placement-db-create-pj6vk" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.979086 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pj6vk" Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.987684 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-sq882"] Nov 28 15:41:32 crc kubenswrapper[4647]: I1128 15:41:32.989014 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sq882" Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.013240 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-sq882"] Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.044900 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-vd5k5"] Nov 28 15:41:33 crc kubenswrapper[4647]: W1128 15:41:33.045582 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5325b221_c839_4627_ade7_88b5887633c1.slice/crio-e24342ac1079a2dfabe4a0a4778f955a7b3ee1adb1f689a6a0ab7363ddbf4d83 WatchSource:0}: Error finding container e24342ac1079a2dfabe4a0a4778f955a7b3ee1adb1f689a6a0ab7363ddbf4d83: Status 404 returned error can't find the container with id e24342ac1079a2dfabe4a0a4778f955a7b3ee1adb1f689a6a0ab7363ddbf4d83 Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.091825 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8dqp\" (UniqueName: \"kubernetes.io/projected/b3a08067-d4b3-4e12-a424-b849589289c9-kube-api-access-r8dqp\") pod \"glance-db-create-sq882\" (UID: \"b3a08067-d4b3-4e12-a424-b849589289c9\") " pod="openstack/glance-db-create-sq882" Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.194234 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8dqp\" (UniqueName: \"kubernetes.io/projected/b3a08067-d4b3-4e12-a424-b849589289c9-kube-api-access-r8dqp\") pod \"glance-db-create-sq882\" (UID: \"b3a08067-d4b3-4e12-a424-b849589289c9\") " pod="openstack/glance-db-create-sq882" Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.229059 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8dqp\" (UniqueName: \"kubernetes.io/projected/b3a08067-d4b3-4e12-a424-b849589289c9-kube-api-access-r8dqp\") pod \"glance-db-create-sq882\" (UID: \"b3a08067-d4b3-4e12-a424-b849589289c9\") " pod="openstack/glance-db-create-sq882" Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.274767 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-vd5k5" event={"ID":"5325b221-c839-4627-ade7-88b5887633c1","Type":"ContainerStarted","Data":"3aabf00034f8c42bcc1436c3ea2d289a5955414228a5cffed2dcb0cfbf271987"} Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.274812 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-vd5k5" event={"ID":"5325b221-c839-4627-ade7-88b5887633c1","Type":"ContainerStarted","Data":"e24342ac1079a2dfabe4a0a4778f955a7b3ee1adb1f689a6a0ab7363ddbf4d83"} Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.297483 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-vd5k5" podStartSLOduration=1.297400343 podStartE2EDuration="1.297400343s" podCreationTimestamp="2025-11-28 15:41:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:33.292456204 +0000 UTC m=+1023.140062635" watchObservedRunningTime="2025-11-28 15:41:33.297400343 +0000 UTC m=+1023.145006764" Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.349222 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sq882" Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.474129 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-pj6vk"] Nov 28 15:41:33 crc kubenswrapper[4647]: I1128 15:41:33.854611 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-sq882"] Nov 28 15:41:33 crc kubenswrapper[4647]: W1128 15:41:33.861544 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3a08067_d4b3_4e12_a424_b849589289c9.slice/crio-ee7e087c71ba543866ea38ce7fd4b012aa6ce4d2d8e7b4fc90238aa0828d609a WatchSource:0}: Error finding container ee7e087c71ba543866ea38ce7fd4b012aa6ce4d2d8e7b4fc90238aa0828d609a: Status 404 returned error can't find the container with id ee7e087c71ba543866ea38ce7fd4b012aa6ce4d2d8e7b4fc90238aa0828d609a Nov 28 15:41:34 crc kubenswrapper[4647]: I1128 15:41:34.283093 4647 generic.go:334] "Generic (PLEG): container finished" podID="5325b221-c839-4627-ade7-88b5887633c1" containerID="3aabf00034f8c42bcc1436c3ea2d289a5955414228a5cffed2dcb0cfbf271987" exitCode=0 Nov 28 15:41:34 crc kubenswrapper[4647]: I1128 15:41:34.283240 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-vd5k5" event={"ID":"5325b221-c839-4627-ade7-88b5887633c1","Type":"ContainerDied","Data":"3aabf00034f8c42bcc1436c3ea2d289a5955414228a5cffed2dcb0cfbf271987"} Nov 28 15:41:34 crc kubenswrapper[4647]: I1128 15:41:34.285262 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-sq882" event={"ID":"b3a08067-d4b3-4e12-a424-b849589289c9","Type":"ContainerStarted","Data":"93f224bde4c7f68df896c358b5bc36a3b9fb404a8e2cadbe1cc7018f514d72b7"} Nov 28 15:41:34 crc kubenswrapper[4647]: I1128 15:41:34.285297 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-sq882" event={"ID":"b3a08067-d4b3-4e12-a424-b849589289c9","Type":"ContainerStarted","Data":"ee7e087c71ba543866ea38ce7fd4b012aa6ce4d2d8e7b4fc90238aa0828d609a"} Nov 28 15:41:34 crc kubenswrapper[4647]: I1128 15:41:34.286687 4647 generic.go:334] "Generic (PLEG): container finished" podID="6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac" containerID="897e2bda119a05a250874d26620442a69065fe15d09e78a1538848bd8db7696b" exitCode=0 Nov 28 15:41:34 crc kubenswrapper[4647]: I1128 15:41:34.286721 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-pj6vk" event={"ID":"6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac","Type":"ContainerDied","Data":"897e2bda119a05a250874d26620442a69065fe15d09e78a1538848bd8db7696b"} Nov 28 15:41:34 crc kubenswrapper[4647]: I1128 15:41:34.286738 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-pj6vk" event={"ID":"6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac","Type":"ContainerStarted","Data":"9d7e8a63acbd7b74093108b3996175f3ebd983ecc35fed14a35af4cf28b5873b"} Nov 28 15:41:34 crc kubenswrapper[4647]: I1128 15:41:34.322149 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.294604 4647 generic.go:334] "Generic (PLEG): container finished" podID="b098c625-f531-4a3a-8532-fbfc7cd4f236" containerID="924d6923f917f0bbadc86ceb456dbab89a51e4a8262d58d90edca246c58dc70d" exitCode=0 Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.294688 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dj6b9" event={"ID":"b098c625-f531-4a3a-8532-fbfc7cd4f236","Type":"ContainerDied","Data":"924d6923f917f0bbadc86ceb456dbab89a51e4a8262d58d90edca246c58dc70d"} Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.296895 4647 generic.go:334] "Generic (PLEG): container finished" podID="b3a08067-d4b3-4e12-a424-b849589289c9" containerID="93f224bde4c7f68df896c358b5bc36a3b9fb404a8e2cadbe1cc7018f514d72b7" exitCode=0 Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.296959 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-sq882" event={"ID":"b3a08067-d4b3-4e12-a424-b849589289c9","Type":"ContainerDied","Data":"93f224bde4c7f68df896c358b5bc36a3b9fb404a8e2cadbe1cc7018f514d72b7"} Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.662268 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pj6vk" Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.767264 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knkfq\" (UniqueName: \"kubernetes.io/projected/6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac-kube-api-access-knkfq\") pod \"6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac\" (UID: \"6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac\") " Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.776656 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac-kube-api-access-knkfq" (OuterVolumeSpecName: "kube-api-access-knkfq") pod "6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac" (UID: "6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac"). InnerVolumeSpecName "kube-api-access-knkfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.813857 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vd5k5" Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.869258 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knkfq\" (UniqueName: \"kubernetes.io/projected/6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac-kube-api-access-knkfq\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.904267 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sq882" Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.970403 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dspfk\" (UniqueName: \"kubernetes.io/projected/5325b221-c839-4627-ade7-88b5887633c1-kube-api-access-dspfk\") pod \"5325b221-c839-4627-ade7-88b5887633c1\" (UID: \"5325b221-c839-4627-ade7-88b5887633c1\") " Nov 28 15:41:35 crc kubenswrapper[4647]: I1128 15:41:35.983006 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5325b221-c839-4627-ade7-88b5887633c1-kube-api-access-dspfk" (OuterVolumeSpecName: "kube-api-access-dspfk") pod "5325b221-c839-4627-ade7-88b5887633c1" (UID: "5325b221-c839-4627-ade7-88b5887633c1"). InnerVolumeSpecName "kube-api-access-dspfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.072256 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8dqp\" (UniqueName: \"kubernetes.io/projected/b3a08067-d4b3-4e12-a424-b849589289c9-kube-api-access-r8dqp\") pod \"b3a08067-d4b3-4e12-a424-b849589289c9\" (UID: \"b3a08067-d4b3-4e12-a424-b849589289c9\") " Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.073207 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dspfk\" (UniqueName: \"kubernetes.io/projected/5325b221-c839-4627-ade7-88b5887633c1-kube-api-access-dspfk\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.078616 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3a08067-d4b3-4e12-a424-b849589289c9-kube-api-access-r8dqp" (OuterVolumeSpecName: "kube-api-access-r8dqp") pod "b3a08067-d4b3-4e12-a424-b849589289c9" (UID: "b3a08067-d4b3-4e12-a424-b849589289c9"). InnerVolumeSpecName "kube-api-access-r8dqp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.175020 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8dqp\" (UniqueName: \"kubernetes.io/projected/b3a08067-d4b3-4e12-a424-b849589289c9-kube-api-access-r8dqp\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.304142 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vd5k5" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.304142 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-vd5k5" event={"ID":"5325b221-c839-4627-ade7-88b5887633c1","Type":"ContainerDied","Data":"e24342ac1079a2dfabe4a0a4778f955a7b3ee1adb1f689a6a0ab7363ddbf4d83"} Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.304254 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e24342ac1079a2dfabe4a0a4778f955a7b3ee1adb1f689a6a0ab7363ddbf4d83" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.314142 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-sq882" event={"ID":"b3a08067-d4b3-4e12-a424-b849589289c9","Type":"ContainerDied","Data":"ee7e087c71ba543866ea38ce7fd4b012aa6ce4d2d8e7b4fc90238aa0828d609a"} Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.314171 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sq882" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.314175 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee7e087c71ba543866ea38ce7fd4b012aa6ce4d2d8e7b4fc90238aa0828d609a" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.316711 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pj6vk" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.316737 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-pj6vk" event={"ID":"6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac","Type":"ContainerDied","Data":"9d7e8a63acbd7b74093108b3996175f3ebd983ecc35fed14a35af4cf28b5873b"} Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.316751 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d7e8a63acbd7b74093108b3996175f3ebd983ecc35fed14a35af4cf28b5873b" Nov 28 15:41:36 crc kubenswrapper[4647]: E1128 15:41:36.431448 4647 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a2f1ed3_7cb6_449c_80b5_2e0c0ff874ac.slice/crio-9d7e8a63acbd7b74093108b3996175f3ebd983ecc35fed14a35af4cf28b5873b\": RecentStats: unable to find data in memory cache]" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.647807 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.803678 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-scripts\") pod \"b098c625-f531-4a3a-8532-fbfc7cd4f236\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.803746 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b79dl\" (UniqueName: \"kubernetes.io/projected/b098c625-f531-4a3a-8532-fbfc7cd4f236-kube-api-access-b79dl\") pod \"b098c625-f531-4a3a-8532-fbfc7cd4f236\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.803779 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-combined-ca-bundle\") pod \"b098c625-f531-4a3a-8532-fbfc7cd4f236\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.803888 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b098c625-f531-4a3a-8532-fbfc7cd4f236-etc-swift\") pod \"b098c625-f531-4a3a-8532-fbfc7cd4f236\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.803946 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-ring-data-devices\") pod \"b098c625-f531-4a3a-8532-fbfc7cd4f236\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.803997 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-dispersionconf\") pod \"b098c625-f531-4a3a-8532-fbfc7cd4f236\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.804021 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-swiftconf\") pod \"b098c625-f531-4a3a-8532-fbfc7cd4f236\" (UID: \"b098c625-f531-4a3a-8532-fbfc7cd4f236\") " Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.805191 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "b098c625-f531-4a3a-8532-fbfc7cd4f236" (UID: "b098c625-f531-4a3a-8532-fbfc7cd4f236"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.805886 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b098c625-f531-4a3a-8532-fbfc7cd4f236-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "b098c625-f531-4a3a-8532-fbfc7cd4f236" (UID: "b098c625-f531-4a3a-8532-fbfc7cd4f236"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.815363 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b098c625-f531-4a3a-8532-fbfc7cd4f236-kube-api-access-b79dl" (OuterVolumeSpecName: "kube-api-access-b79dl") pod "b098c625-f531-4a3a-8532-fbfc7cd4f236" (UID: "b098c625-f531-4a3a-8532-fbfc7cd4f236"). InnerVolumeSpecName "kube-api-access-b79dl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.826932 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "b098c625-f531-4a3a-8532-fbfc7cd4f236" (UID: "b098c625-f531-4a3a-8532-fbfc7cd4f236"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.841266 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-scripts" (OuterVolumeSpecName: "scripts") pod "b098c625-f531-4a3a-8532-fbfc7cd4f236" (UID: "b098c625-f531-4a3a-8532-fbfc7cd4f236"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.845339 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b098c625-f531-4a3a-8532-fbfc7cd4f236" (UID: "b098c625-f531-4a3a-8532-fbfc7cd4f236"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.854611 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "b098c625-f531-4a3a-8532-fbfc7cd4f236" (UID: "b098c625-f531-4a3a-8532-fbfc7cd4f236"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.906473 4647 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b098c625-f531-4a3a-8532-fbfc7cd4f236-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.906503 4647 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.906525 4647 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.906534 4647 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.906542 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b098c625-f531-4a3a-8532-fbfc7cd4f236-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.906550 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b79dl\" (UniqueName: \"kubernetes.io/projected/b098c625-f531-4a3a-8532-fbfc7cd4f236-kube-api-access-b79dl\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:36 crc kubenswrapper[4647]: I1128 15:41:36.906558 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b098c625-f531-4a3a-8532-fbfc7cd4f236-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:37 crc kubenswrapper[4647]: I1128 15:41:37.330668 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dj6b9" event={"ID":"b098c625-f531-4a3a-8532-fbfc7cd4f236","Type":"ContainerDied","Data":"780a0d2fd93a977a293ab58f8e894ea4f357090c685f325242007ba515320978"} Nov 28 15:41:37 crc kubenswrapper[4647]: I1128 15:41:37.331209 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="780a0d2fd93a977a293ab58f8e894ea4f357090c685f325242007ba515320978" Nov 28 15:41:37 crc kubenswrapper[4647]: I1128 15:41:37.330804 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dj6b9" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.414289 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-b08b-account-create-hqtp6"] Nov 28 15:41:42 crc kubenswrapper[4647]: E1128 15:41:42.414643 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac" containerName="mariadb-database-create" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.414655 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac" containerName="mariadb-database-create" Nov 28 15:41:42 crc kubenswrapper[4647]: E1128 15:41:42.414669 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b098c625-f531-4a3a-8532-fbfc7cd4f236" containerName="swift-ring-rebalance" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.414675 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b098c625-f531-4a3a-8532-fbfc7cd4f236" containerName="swift-ring-rebalance" Nov 28 15:41:42 crc kubenswrapper[4647]: E1128 15:41:42.414709 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5325b221-c839-4627-ade7-88b5887633c1" containerName="mariadb-database-create" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.414715 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="5325b221-c839-4627-ade7-88b5887633c1" containerName="mariadb-database-create" Nov 28 15:41:42 crc kubenswrapper[4647]: E1128 15:41:42.414723 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3a08067-d4b3-4e12-a424-b849589289c9" containerName="mariadb-database-create" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.414731 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3a08067-d4b3-4e12-a424-b849589289c9" containerName="mariadb-database-create" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.414888 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3a08067-d4b3-4e12-a424-b849589289c9" containerName="mariadb-database-create" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.414899 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="5325b221-c839-4627-ade7-88b5887633c1" containerName="mariadb-database-create" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.414910 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="b098c625-f531-4a3a-8532-fbfc7cd4f236" containerName="swift-ring-rebalance" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.414918 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac" containerName="mariadb-database-create" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.415520 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b08b-account-create-hqtp6" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.418303 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.428626 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b08b-account-create-hqtp6"] Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.529231 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl6lq\" (UniqueName: \"kubernetes.io/projected/35e79807-4ef8-4edc-ac3c-7d04a71cc82e-kube-api-access-tl6lq\") pod \"keystone-b08b-account-create-hqtp6\" (UID: \"35e79807-4ef8-4edc-ac3c-7d04a71cc82e\") " pod="openstack/keystone-b08b-account-create-hqtp6" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.631525 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl6lq\" (UniqueName: \"kubernetes.io/projected/35e79807-4ef8-4edc-ac3c-7d04a71cc82e-kube-api-access-tl6lq\") pod \"keystone-b08b-account-create-hqtp6\" (UID: \"35e79807-4ef8-4edc-ac3c-7d04a71cc82e\") " pod="openstack/keystone-b08b-account-create-hqtp6" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.668096 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl6lq\" (UniqueName: \"kubernetes.io/projected/35e79807-4ef8-4edc-ac3c-7d04a71cc82e-kube-api-access-tl6lq\") pod \"keystone-b08b-account-create-hqtp6\" (UID: \"35e79807-4ef8-4edc-ac3c-7d04a71cc82e\") " pod="openstack/keystone-b08b-account-create-hqtp6" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.740273 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-0c68-account-create-pkl88"] Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.740831 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b08b-account-create-hqtp6" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.741772 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0c68-account-create-pkl88" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.745252 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.759086 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-0c68-account-create-pkl88"] Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.836495 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kx2pt\" (UniqueName: \"kubernetes.io/projected/90e3a33a-b397-4644-9f1b-89677ee76424-kube-api-access-kx2pt\") pod \"placement-0c68-account-create-pkl88\" (UID: \"90e3a33a-b397-4644-9f1b-89677ee76424\") " pod="openstack/placement-0c68-account-create-pkl88" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.938681 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kx2pt\" (UniqueName: \"kubernetes.io/projected/90e3a33a-b397-4644-9f1b-89677ee76424-kube-api-access-kx2pt\") pod \"placement-0c68-account-create-pkl88\" (UID: \"90e3a33a-b397-4644-9f1b-89677ee76424\") " pod="openstack/placement-0c68-account-create-pkl88" Nov 28 15:41:42 crc kubenswrapper[4647]: I1128 15:41:42.972162 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kx2pt\" (UniqueName: \"kubernetes.io/projected/90e3a33a-b397-4644-9f1b-89677ee76424-kube-api-access-kx2pt\") pod \"placement-0c68-account-create-pkl88\" (UID: \"90e3a33a-b397-4644-9f1b-89677ee76424\") " pod="openstack/placement-0c68-account-create-pkl88" Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.064340 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0c68-account-create-pkl88" Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.111490 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-84aa-account-create-g9ndj"] Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.115686 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-84aa-account-create-g9ndj" Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.118722 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.133434 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-84aa-account-create-g9ndj"] Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.142662 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvnfn\" (UniqueName: \"kubernetes.io/projected/a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6-kube-api-access-lvnfn\") pod \"glance-84aa-account-create-g9ndj\" (UID: \"a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6\") " pod="openstack/glance-84aa-account-create-g9ndj" Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.213806 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b08b-account-create-hqtp6"] Nov 28 15:41:43 crc kubenswrapper[4647]: W1128 15:41:43.224059 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod35e79807_4ef8_4edc_ac3c_7d04a71cc82e.slice/crio-39374c19f6ad919c33496d09635955c906b15b41824f3a752a4a974161a69987 WatchSource:0}: Error finding container 39374c19f6ad919c33496d09635955c906b15b41824f3a752a4a974161a69987: Status 404 returned error can't find the container with id 39374c19f6ad919c33496d09635955c906b15b41824f3a752a4a974161a69987 Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.245084 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvnfn\" (UniqueName: \"kubernetes.io/projected/a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6-kube-api-access-lvnfn\") pod \"glance-84aa-account-create-g9ndj\" (UID: \"a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6\") " pod="openstack/glance-84aa-account-create-g9ndj" Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.269024 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvnfn\" (UniqueName: \"kubernetes.io/projected/a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6-kube-api-access-lvnfn\") pod \"glance-84aa-account-create-g9ndj\" (UID: \"a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6\") " pod="openstack/glance-84aa-account-create-g9ndj" Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.381212 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b08b-account-create-hqtp6" event={"ID":"35e79807-4ef8-4edc-ac3c-7d04a71cc82e","Type":"ContainerStarted","Data":"39374c19f6ad919c33496d09635955c906b15b41824f3a752a4a974161a69987"} Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.442646 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-84aa-account-create-g9ndj" Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.515550 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-4psvg" podUID="fab755a9-f20f-4bc6-a7e2-353396a0ce74" containerName="ovn-controller" probeResult="failure" output=< Nov 28 15:41:43 crc kubenswrapper[4647]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 15:41:43 crc kubenswrapper[4647]: > Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.561500 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-0c68-account-create-pkl88"] Nov 28 15:41:43 crc kubenswrapper[4647]: W1128 15:41:43.570557 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod90e3a33a_b397_4644_9f1b_89677ee76424.slice/crio-31e3dae39e5e0ee9ddce09b6513804d7aa54f6236fc221114f2cc92cdc5e1d7e WatchSource:0}: Error finding container 31e3dae39e5e0ee9ddce09b6513804d7aa54f6236fc221114f2cc92cdc5e1d7e: Status 404 returned error can't find the container with id 31e3dae39e5e0ee9ddce09b6513804d7aa54f6236fc221114f2cc92cdc5e1d7e Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.702519 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.719211 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-vh9b5" Nov 28 15:41:43 crc kubenswrapper[4647]: W1128 15:41:43.935923 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda40f21f8_fd75_4ac5_826a_4d7ae9ae14b6.slice/crio-402872a18dd79617c7ff8879fc9aa012a062ff1a9fedbcec6dd7bf682bdb1754 WatchSource:0}: Error finding container 402872a18dd79617c7ff8879fc9aa012a062ff1a9fedbcec6dd7bf682bdb1754: Status 404 returned error can't find the container with id 402872a18dd79617c7ff8879fc9aa012a062ff1a9fedbcec6dd7bf682bdb1754 Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.962964 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-84aa-account-create-g9ndj"] Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.984676 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-4psvg-config-ss5b8"] Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.988231 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:43 crc kubenswrapper[4647]: I1128 15:41:43.998460 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.036326 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-4psvg-config-ss5b8"] Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.088988 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-additional-scripts\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.089043 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgkq6\" (UniqueName: \"kubernetes.io/projected/93d0a4f3-4560-4765-9d9c-fbf7688671e6-kube-api-access-vgkq6\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.089075 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-scripts\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.089105 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-log-ovn\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.089306 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run-ovn\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.089604 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.191388 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-additional-scripts\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.191452 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgkq6\" (UniqueName: \"kubernetes.io/projected/93d0a4f3-4560-4765-9d9c-fbf7688671e6-kube-api-access-vgkq6\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.191481 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-scripts\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.191505 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-log-ovn\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.191557 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run-ovn\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.191878 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-log-ovn\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.191974 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.191991 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run-ovn\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.192060 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.192268 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-additional-scripts\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.193545 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-scripts\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.218141 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgkq6\" (UniqueName: \"kubernetes.io/projected/93d0a4f3-4560-4765-9d9c-fbf7688671e6-kube-api-access-vgkq6\") pod \"ovn-controller-4psvg-config-ss5b8\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.369058 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.389681 4647 generic.go:334] "Generic (PLEG): container finished" podID="27fe6c77-c0f2-4398-a337-133eaca78fb4" containerID="c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914" exitCode=0 Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.389749 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"27fe6c77-c0f2-4398-a337-133eaca78fb4","Type":"ContainerDied","Data":"c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914"} Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.391813 4647 generic.go:334] "Generic (PLEG): container finished" podID="90e3a33a-b397-4644-9f1b-89677ee76424" containerID="ae0b1117beabadbba219d7813b2dcfd1d39468b97d7554983595089271c05d0e" exitCode=0 Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.391967 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0c68-account-create-pkl88" event={"ID":"90e3a33a-b397-4644-9f1b-89677ee76424","Type":"ContainerDied","Data":"ae0b1117beabadbba219d7813b2dcfd1d39468b97d7554983595089271c05d0e"} Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.392009 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0c68-account-create-pkl88" event={"ID":"90e3a33a-b397-4644-9f1b-89677ee76424","Type":"ContainerStarted","Data":"31e3dae39e5e0ee9ddce09b6513804d7aa54f6236fc221114f2cc92cdc5e1d7e"} Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.397654 4647 generic.go:334] "Generic (PLEG): container finished" podID="49c7e330-cae6-469f-9a44-7087cc112af1" containerID="216cb0060d00175d92f57504f5d155efb9cc08933244d34a8618e47e828942fb" exitCode=0 Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.400571 4647 generic.go:334] "Generic (PLEG): container finished" podID="35e79807-4ef8-4edc-ac3c-7d04a71cc82e" containerID="aa38034078bf7864703a09170c277cc314671f27200cd0251cbcde4ab050e633" exitCode=0 Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.410518 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"49c7e330-cae6-469f-9a44-7087cc112af1","Type":"ContainerDied","Data":"216cb0060d00175d92f57504f5d155efb9cc08933244d34a8618e47e828942fb"} Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.410593 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b08b-account-create-hqtp6" event={"ID":"35e79807-4ef8-4edc-ac3c-7d04a71cc82e","Type":"ContainerDied","Data":"aa38034078bf7864703a09170c277cc314671f27200cd0251cbcde4ab050e633"} Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.410614 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-84aa-account-create-g9ndj" event={"ID":"a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6","Type":"ContainerStarted","Data":"b4b033dd682e1f836613580453c6017208b1e48d08782648666fdbb1ba5af303"} Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.410628 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-84aa-account-create-g9ndj" event={"ID":"a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6","Type":"ContainerStarted","Data":"402872a18dd79617c7ff8879fc9aa012a062ff1a9fedbcec6dd7bf682bdb1754"} Nov 28 15:41:44 crc kubenswrapper[4647]: I1128 15:41:44.911727 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-4psvg-config-ss5b8"] Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.415442 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-4psvg-config-ss5b8" event={"ID":"93d0a4f3-4560-4765-9d9c-fbf7688671e6","Type":"ContainerStarted","Data":"bda0e7a2501d0632e4126d44012a72ccc6210bdc823dd29bf4dbd6deaaa1b302"} Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.415801 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-4psvg-config-ss5b8" event={"ID":"93d0a4f3-4560-4765-9d9c-fbf7688671e6","Type":"ContainerStarted","Data":"422cd5de5be5b111321d7e494fc35a27b6c0682a98a169a4d5cb0307be109f44"} Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.417550 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"27fe6c77-c0f2-4398-a337-133eaca78fb4","Type":"ContainerStarted","Data":"7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102"} Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.418319 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.420477 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"49c7e330-cae6-469f-9a44-7087cc112af1","Type":"ContainerStarted","Data":"f8fd3dd1f90ff0fbd13ce6172d2787a11e63ac207094ffff5755cd5628d899c9"} Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.420691 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.422137 4647 generic.go:334] "Generic (PLEG): container finished" podID="a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6" containerID="b4b033dd682e1f836613580453c6017208b1e48d08782648666fdbb1ba5af303" exitCode=0 Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.422363 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-84aa-account-create-g9ndj" event={"ID":"a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6","Type":"ContainerDied","Data":"b4b033dd682e1f836613580453c6017208b1e48d08782648666fdbb1ba5af303"} Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.478580 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.680963361 podStartE2EDuration="1m27.478546126s" podCreationTimestamp="2025-11-28 15:40:18 +0000 UTC" firstStartedPulling="2025-11-28 15:40:20.249470008 +0000 UTC m=+950.097076429" lastFinishedPulling="2025-11-28 15:41:08.047052743 +0000 UTC m=+997.894659194" observedRunningTime="2025-11-28 15:41:45.473785881 +0000 UTC m=+1035.321392302" watchObservedRunningTime="2025-11-28 15:41:45.478546126 +0000 UTC m=+1035.326152577" Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.482103 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-4psvg-config-ss5b8" podStartSLOduration=2.482091258 podStartE2EDuration="2.482091258s" podCreationTimestamp="2025-11-28 15:41:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:45.451739344 +0000 UTC m=+1035.299345775" watchObservedRunningTime="2025-11-28 15:41:45.482091258 +0000 UTC m=+1035.329697679" Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.503439 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.958527373 podStartE2EDuration="1m27.503400396s" podCreationTimestamp="2025-11-28 15:40:18 +0000 UTC" firstStartedPulling="2025-11-28 15:40:20.681009009 +0000 UTC m=+950.528615430" lastFinishedPulling="2025-11-28 15:41:08.225882032 +0000 UTC m=+998.073488453" observedRunningTime="2025-11-28 15:41:45.495331825 +0000 UTC m=+1035.342938246" watchObservedRunningTime="2025-11-28 15:41:45.503400396 +0000 UTC m=+1035.351006817" Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.775295 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0c68-account-create-pkl88" Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.867007 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b08b-account-create-hqtp6" Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.922184 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kx2pt\" (UniqueName: \"kubernetes.io/projected/90e3a33a-b397-4644-9f1b-89677ee76424-kube-api-access-kx2pt\") pod \"90e3a33a-b397-4644-9f1b-89677ee76424\" (UID: \"90e3a33a-b397-4644-9f1b-89677ee76424\") " Nov 28 15:41:45 crc kubenswrapper[4647]: I1128 15:41:45.932619 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90e3a33a-b397-4644-9f1b-89677ee76424-kube-api-access-kx2pt" (OuterVolumeSpecName: "kube-api-access-kx2pt") pod "90e3a33a-b397-4644-9f1b-89677ee76424" (UID: "90e3a33a-b397-4644-9f1b-89677ee76424"). InnerVolumeSpecName "kube-api-access-kx2pt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.024388 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tl6lq\" (UniqueName: \"kubernetes.io/projected/35e79807-4ef8-4edc-ac3c-7d04a71cc82e-kube-api-access-tl6lq\") pod \"35e79807-4ef8-4edc-ac3c-7d04a71cc82e\" (UID: \"35e79807-4ef8-4edc-ac3c-7d04a71cc82e\") " Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.025077 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kx2pt\" (UniqueName: \"kubernetes.io/projected/90e3a33a-b397-4644-9f1b-89677ee76424-kube-api-access-kx2pt\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.029280 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35e79807-4ef8-4edc-ac3c-7d04a71cc82e-kube-api-access-tl6lq" (OuterVolumeSpecName: "kube-api-access-tl6lq") pod "35e79807-4ef8-4edc-ac3c-7d04a71cc82e" (UID: "35e79807-4ef8-4edc-ac3c-7d04a71cc82e"). InnerVolumeSpecName "kube-api-access-tl6lq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.125691 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tl6lq\" (UniqueName: \"kubernetes.io/projected/35e79807-4ef8-4edc-ac3c-7d04a71cc82e-kube-api-access-tl6lq\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.441998 4647 generic.go:334] "Generic (PLEG): container finished" podID="93d0a4f3-4560-4765-9d9c-fbf7688671e6" containerID="bda0e7a2501d0632e4126d44012a72ccc6210bdc823dd29bf4dbd6deaaa1b302" exitCode=0 Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.442076 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-4psvg-config-ss5b8" event={"ID":"93d0a4f3-4560-4765-9d9c-fbf7688671e6","Type":"ContainerDied","Data":"bda0e7a2501d0632e4126d44012a72ccc6210bdc823dd29bf4dbd6deaaa1b302"} Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.446716 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0c68-account-create-pkl88" event={"ID":"90e3a33a-b397-4644-9f1b-89677ee76424","Type":"ContainerDied","Data":"31e3dae39e5e0ee9ddce09b6513804d7aa54f6236fc221114f2cc92cdc5e1d7e"} Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.446764 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31e3dae39e5e0ee9ddce09b6513804d7aa54f6236fc221114f2cc92cdc5e1d7e" Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.446860 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0c68-account-create-pkl88" Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.450131 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b08b-account-create-hqtp6" event={"ID":"35e79807-4ef8-4edc-ac3c-7d04a71cc82e","Type":"ContainerDied","Data":"39374c19f6ad919c33496d09635955c906b15b41824f3a752a4a974161a69987"} Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.450199 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39374c19f6ad919c33496d09635955c906b15b41824f3a752a4a974161a69987" Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.450344 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b08b-account-create-hqtp6" Nov 28 15:41:46 crc kubenswrapper[4647]: I1128 15:41:46.889548 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-84aa-account-create-g9ndj" Nov 28 15:41:47 crc kubenswrapper[4647]: I1128 15:41:47.043958 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvnfn\" (UniqueName: \"kubernetes.io/projected/a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6-kube-api-access-lvnfn\") pod \"a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6\" (UID: \"a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6\") " Nov 28 15:41:47 crc kubenswrapper[4647]: I1128 15:41:47.057554 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6-kube-api-access-lvnfn" (OuterVolumeSpecName: "kube-api-access-lvnfn") pod "a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6" (UID: "a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6"). InnerVolumeSpecName "kube-api-access-lvnfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:47 crc kubenswrapper[4647]: I1128 15:41:47.147265 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvnfn\" (UniqueName: \"kubernetes.io/projected/a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6-kube-api-access-lvnfn\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:47 crc kubenswrapper[4647]: I1128 15:41:47.461813 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-84aa-account-create-g9ndj" Nov 28 15:41:47 crc kubenswrapper[4647]: I1128 15:41:47.461821 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-84aa-account-create-g9ndj" event={"ID":"a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6","Type":"ContainerDied","Data":"402872a18dd79617c7ff8879fc9aa012a062ff1a9fedbcec6dd7bf682bdb1754"} Nov 28 15:41:47 crc kubenswrapper[4647]: I1128 15:41:47.461877 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="402872a18dd79617c7ff8879fc9aa012a062ff1a9fedbcec6dd7bf682bdb1754" Nov 28 15:41:47 crc kubenswrapper[4647]: I1128 15:41:47.859831 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:47 crc kubenswrapper[4647]: I1128 15:41:47.869077 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ab7ffd14-cb79-40b8-854d-1dd1deca75f2-etc-swift\") pod \"swift-storage-0\" (UID: \"ab7ffd14-cb79-40b8-854d-1dd1deca75f2\") " pod="openstack/swift-storage-0" Nov 28 15:41:47 crc kubenswrapper[4647]: I1128 15:41:47.955068 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:47 crc kubenswrapper[4647]: I1128 15:41:47.996871 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.062185 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-additional-scripts\") pod \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.062284 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run-ovn\") pod \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.062348 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-scripts\") pod \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.062390 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgkq6\" (UniqueName: \"kubernetes.io/projected/93d0a4f3-4560-4765-9d9c-fbf7688671e6-kube-api-access-vgkq6\") pod \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.062511 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-log-ovn\") pod \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.062536 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run\") pod \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\" (UID: \"93d0a4f3-4560-4765-9d9c-fbf7688671e6\") " Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.062906 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run" (OuterVolumeSpecName: "var-run") pod "93d0a4f3-4560-4765-9d9c-fbf7688671e6" (UID: "93d0a4f3-4560-4765-9d9c-fbf7688671e6"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.063876 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "93d0a4f3-4560-4765-9d9c-fbf7688671e6" (UID: "93d0a4f3-4560-4765-9d9c-fbf7688671e6"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.064309 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-scripts" (OuterVolumeSpecName: "scripts") pod "93d0a4f3-4560-4765-9d9c-fbf7688671e6" (UID: "93d0a4f3-4560-4765-9d9c-fbf7688671e6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.064446 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "93d0a4f3-4560-4765-9d9c-fbf7688671e6" (UID: "93d0a4f3-4560-4765-9d9c-fbf7688671e6"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.064469 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "93d0a4f3-4560-4765-9d9c-fbf7688671e6" (UID: "93d0a4f3-4560-4765-9d9c-fbf7688671e6"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.069699 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93d0a4f3-4560-4765-9d9c-fbf7688671e6-kube-api-access-vgkq6" (OuterVolumeSpecName: "kube-api-access-vgkq6") pod "93d0a4f3-4560-4765-9d9c-fbf7688671e6" (UID: "93d0a4f3-4560-4765-9d9c-fbf7688671e6"). InnerVolumeSpecName "kube-api-access-vgkq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.167721 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.167750 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgkq6\" (UniqueName: \"kubernetes.io/projected/93d0a4f3-4560-4765-9d9c-fbf7688671e6-kube-api-access-vgkq6\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.167761 4647 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.167773 4647 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.167781 4647 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/93d0a4f3-4560-4765-9d9c-fbf7688671e6-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.167791 4647 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/93d0a4f3-4560-4765-9d9c-fbf7688671e6-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.275626 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-9gx56"] Nov 28 15:41:48 crc kubenswrapper[4647]: E1128 15:41:48.276245 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90e3a33a-b397-4644-9f1b-89677ee76424" containerName="mariadb-account-create" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.276258 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="90e3a33a-b397-4644-9f1b-89677ee76424" containerName="mariadb-account-create" Nov 28 15:41:48 crc kubenswrapper[4647]: E1128 15:41:48.276272 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35e79807-4ef8-4edc-ac3c-7d04a71cc82e" containerName="mariadb-account-create" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.276279 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="35e79807-4ef8-4edc-ac3c-7d04a71cc82e" containerName="mariadb-account-create" Nov 28 15:41:48 crc kubenswrapper[4647]: E1128 15:41:48.276290 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93d0a4f3-4560-4765-9d9c-fbf7688671e6" containerName="ovn-config" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.276296 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="93d0a4f3-4560-4765-9d9c-fbf7688671e6" containerName="ovn-config" Nov 28 15:41:48 crc kubenswrapper[4647]: E1128 15:41:48.276314 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6" containerName="mariadb-account-create" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.276320 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6" containerName="mariadb-account-create" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.276501 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="90e3a33a-b397-4644-9f1b-89677ee76424" containerName="mariadb-account-create" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.276515 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6" containerName="mariadb-account-create" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.276534 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="93d0a4f3-4560-4765-9d9c-fbf7688671e6" containerName="ovn-config" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.276541 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="35e79807-4ef8-4edc-ac3c-7d04a71cc82e" containerName="mariadb-account-create" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.283846 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.286813 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-dp9jz" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.286976 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.291405 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-9gx56"] Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.373660 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsdsx\" (UniqueName: \"kubernetes.io/projected/b5b74ca6-04a9-46a3-8aa2-658580db07c0-kube-api-access-rsdsx\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.373735 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-combined-ca-bundle\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.373824 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-config-data\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.373884 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-db-sync-config-data\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.470031 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-4psvg-config-ss5b8" event={"ID":"93d0a4f3-4560-4765-9d9c-fbf7688671e6","Type":"ContainerDied","Data":"422cd5de5be5b111321d7e494fc35a27b6c0682a98a169a4d5cb0307be109f44"} Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.470075 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="422cd5de5be5b111321d7e494fc35a27b6c0682a98a169a4d5cb0307be109f44" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.470122 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-4psvg-config-ss5b8" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.475084 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsdsx\" (UniqueName: \"kubernetes.io/projected/b5b74ca6-04a9-46a3-8aa2-658580db07c0-kube-api-access-rsdsx\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.475150 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-combined-ca-bundle\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.475367 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-config-data\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.475497 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-db-sync-config-data\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.481503 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-combined-ca-bundle\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.481616 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-config-data\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.482592 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-db-sync-config-data\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.498911 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.505836 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsdsx\" (UniqueName: \"kubernetes.io/projected/b5b74ca6-04a9-46a3-8aa2-658580db07c0-kube-api-access-rsdsx\") pod \"glance-db-sync-9gx56\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: W1128 15:41:48.506319 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab7ffd14_cb79_40b8_854d_1dd1deca75f2.slice/crio-49d3c23aa1dc7b21141a7843b46e0941a4aa695b19bc1862f4a94241ef7dace2 WatchSource:0}: Error finding container 49d3c23aa1dc7b21141a7843b46e0941a4aa695b19bc1862f4a94241ef7dace2: Status 404 returned error can't find the container with id 49d3c23aa1dc7b21141a7843b46e0941a4aa695b19bc1862f4a94241ef7dace2 Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.552399 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-4psvg" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.572638 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-4psvg-config-ss5b8"] Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.601275 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9gx56" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.613775 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-4psvg-config-ss5b8"] Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.704021 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-4psvg-config-5x867"] Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.705445 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: W1128 15:41:48.710355 4647 reflector.go:561] object-"openstack"/"ovncontroller-extra-scripts": failed to list *v1.ConfigMap: configmaps "ovncontroller-extra-scripts" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 28 15:41:48 crc kubenswrapper[4647]: E1128 15:41:48.710425 4647 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"ovncontroller-extra-scripts\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"ovncontroller-extra-scripts\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.743195 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-4psvg-config-5x867"] Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.885564 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2bzk\" (UniqueName: \"kubernetes.io/projected/78fd3ad0-1139-4a99-b47c-65fac18d7576-kube-api-access-t2bzk\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.885628 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-log-ovn\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.885649 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-scripts\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.885680 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run-ovn\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.885714 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-additional-scripts\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.885741 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.988582 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-log-ovn\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.988926 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-scripts\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.988887 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-log-ovn\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.989007 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run-ovn\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.989111 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run-ovn\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.989045 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-additional-scripts\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.989242 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.989328 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.989377 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2bzk\" (UniqueName: \"kubernetes.io/projected/78fd3ad0-1139-4a99-b47c-65fac18d7576-kube-api-access-t2bzk\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:48 crc kubenswrapper[4647]: I1128 15:41:48.991050 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-scripts\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:49 crc kubenswrapper[4647]: I1128 15:41:49.022130 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2bzk\" (UniqueName: \"kubernetes.io/projected/78fd3ad0-1139-4a99-b47c-65fac18d7576-kube-api-access-t2bzk\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:49 crc kubenswrapper[4647]: I1128 15:41:49.074791 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-9gx56"] Nov 28 15:41:49 crc kubenswrapper[4647]: W1128 15:41:49.081350 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5b74ca6_04a9_46a3_8aa2_658580db07c0.slice/crio-e0db575e3931b54988f1bcc25738584bb5def1222468ca2bd3a47358827622ba WatchSource:0}: Error finding container e0db575e3931b54988f1bcc25738584bb5def1222468ca2bd3a47358827622ba: Status 404 returned error can't find the container with id e0db575e3931b54988f1bcc25738584bb5def1222468ca2bd3a47358827622ba Nov 28 15:41:49 crc kubenswrapper[4647]: I1128 15:41:49.480137 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"49d3c23aa1dc7b21141a7843b46e0941a4aa695b19bc1862f4a94241ef7dace2"} Nov 28 15:41:49 crc kubenswrapper[4647]: I1128 15:41:49.481846 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9gx56" event={"ID":"b5b74ca6-04a9-46a3-8aa2-658580db07c0","Type":"ContainerStarted","Data":"e0db575e3931b54988f1bcc25738584bb5def1222468ca2bd3a47358827622ba"} Nov 28 15:41:49 crc kubenswrapper[4647]: I1128 15:41:49.843866 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 15:41:49 crc kubenswrapper[4647]: I1128 15:41:49.850052 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-additional-scripts\") pod \"ovn-controller-4psvg-config-5x867\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:49 crc kubenswrapper[4647]: I1128 15:41:49.919533 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:50 crc kubenswrapper[4647]: I1128 15:41:50.409781 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93d0a4f3-4560-4765-9d9c-fbf7688671e6" path="/var/lib/kubelet/pods/93d0a4f3-4560-4765-9d9c-fbf7688671e6/volumes" Nov 28 15:41:50 crc kubenswrapper[4647]: I1128 15:41:50.519013 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-4psvg-config-5x867"] Nov 28 15:41:51 crc kubenswrapper[4647]: I1128 15:41:51.522763 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"601a3f02788c1b1b4db7f3cfff0b8300fbc29b95131b04b51583ceec2cba3c43"} Nov 28 15:41:51 crc kubenswrapper[4647]: I1128 15:41:51.523213 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"bb4949935175d7119b04ccca188ff479f63aa33866087142e741ddcb61b80068"} Nov 28 15:41:51 crc kubenswrapper[4647]: I1128 15:41:51.525226 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-4psvg-config-5x867" event={"ID":"78fd3ad0-1139-4a99-b47c-65fac18d7576","Type":"ContainerStarted","Data":"1d765177d4ab6b1adb6f24dda7da2c260fbfa6a0ff35f2e53ad08e72328e8cf0"} Nov 28 15:41:51 crc kubenswrapper[4647]: I1128 15:41:51.525281 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-4psvg-config-5x867" event={"ID":"78fd3ad0-1139-4a99-b47c-65fac18d7576","Type":"ContainerStarted","Data":"660a40b2e3b51509738f518207584a529a5cb8b9685410c08a36e564e58f2a2d"} Nov 28 15:41:51 crc kubenswrapper[4647]: I1128 15:41:51.575883 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-4psvg-config-5x867" podStartSLOduration=3.575853373 podStartE2EDuration="3.575853373s" podCreationTimestamp="2025-11-28 15:41:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:51.55291868 +0000 UTC m=+1041.400525121" watchObservedRunningTime="2025-11-28 15:41:51.575853373 +0000 UTC m=+1041.423459794" Nov 28 15:41:52 crc kubenswrapper[4647]: I1128 15:41:52.535679 4647 generic.go:334] "Generic (PLEG): container finished" podID="78fd3ad0-1139-4a99-b47c-65fac18d7576" containerID="1d765177d4ab6b1adb6f24dda7da2c260fbfa6a0ff35f2e53ad08e72328e8cf0" exitCode=0 Nov 28 15:41:52 crc kubenswrapper[4647]: I1128 15:41:52.535767 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-4psvg-config-5x867" event={"ID":"78fd3ad0-1139-4a99-b47c-65fac18d7576","Type":"ContainerDied","Data":"1d765177d4ab6b1adb6f24dda7da2c260fbfa6a0ff35f2e53ad08e72328e8cf0"} Nov 28 15:41:52 crc kubenswrapper[4647]: I1128 15:41:52.539088 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"5e89d8ee20679c3938dc90d1daebb38e2a21c5098505b5ae4786d3b329ebc3c5"} Nov 28 15:41:53 crc kubenswrapper[4647]: I1128 15:41:53.556050 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"69eb007698a2e52488a5b9d416eb100ef8a4adeae05f03fa5ee07507e597fe53"} Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.206947 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.322984 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run\") pod \"78fd3ad0-1139-4a99-b47c-65fac18d7576\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.323105 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-additional-scripts\") pod \"78fd3ad0-1139-4a99-b47c-65fac18d7576\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.323226 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run-ovn\") pod \"78fd3ad0-1139-4a99-b47c-65fac18d7576\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.324004 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-log-ovn\") pod \"78fd3ad0-1139-4a99-b47c-65fac18d7576\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.324044 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2bzk\" (UniqueName: \"kubernetes.io/projected/78fd3ad0-1139-4a99-b47c-65fac18d7576-kube-api-access-t2bzk\") pod \"78fd3ad0-1139-4a99-b47c-65fac18d7576\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.324071 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-scripts\") pod \"78fd3ad0-1139-4a99-b47c-65fac18d7576\" (UID: \"78fd3ad0-1139-4a99-b47c-65fac18d7576\") " Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.323306 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run" (OuterVolumeSpecName: "var-run") pod "78fd3ad0-1139-4a99-b47c-65fac18d7576" (UID: "78fd3ad0-1139-4a99-b47c-65fac18d7576"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.323391 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "78fd3ad0-1139-4a99-b47c-65fac18d7576" (UID: "78fd3ad0-1139-4a99-b47c-65fac18d7576"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.323908 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "78fd3ad0-1139-4a99-b47c-65fac18d7576" (UID: "78fd3ad0-1139-4a99-b47c-65fac18d7576"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.324308 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "78fd3ad0-1139-4a99-b47c-65fac18d7576" (UID: "78fd3ad0-1139-4a99-b47c-65fac18d7576"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.325169 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-scripts" (OuterVolumeSpecName: "scripts") pod "78fd3ad0-1139-4a99-b47c-65fac18d7576" (UID: "78fd3ad0-1139-4a99-b47c-65fac18d7576"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.325197 4647 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.325246 4647 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.325432 4647 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.325457 4647 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78fd3ad0-1139-4a99-b47c-65fac18d7576-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.332751 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78fd3ad0-1139-4a99-b47c-65fac18d7576-kube-api-access-t2bzk" (OuterVolumeSpecName: "kube-api-access-t2bzk") pod "78fd3ad0-1139-4a99-b47c-65fac18d7576" (UID: "78fd3ad0-1139-4a99-b47c-65fac18d7576"). InnerVolumeSpecName "kube-api-access-t2bzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.427384 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2bzk\" (UniqueName: \"kubernetes.io/projected/78fd3ad0-1139-4a99-b47c-65fac18d7576-kube-api-access-t2bzk\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.427429 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78fd3ad0-1139-4a99-b47c-65fac18d7576-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.574337 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"25901bec2d5e921928dcdf5e640429d417f49836b48543f2725bfced519da7fc"} Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.576714 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-4psvg-config-5x867" event={"ID":"78fd3ad0-1139-4a99-b47c-65fac18d7576","Type":"ContainerDied","Data":"660a40b2e3b51509738f518207584a529a5cb8b9685410c08a36e564e58f2a2d"} Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.576742 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="660a40b2e3b51509738f518207584a529a5cb8b9685410c08a36e564e58f2a2d" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.576808 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-4psvg-config-5x867" Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.660631 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-4psvg-config-5x867"] Nov 28 15:41:54 crc kubenswrapper[4647]: I1128 15:41:54.667226 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-4psvg-config-5x867"] Nov 28 15:41:55 crc kubenswrapper[4647]: I1128 15:41:55.631669 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"ef8d3e460328d792090a66e83c2fe9e1a7d4f31f7739a4d1ae341a332c7888c0"} Nov 28 15:41:55 crc kubenswrapper[4647]: I1128 15:41:55.631722 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"3d912d6d66fa175ffdc29ba4686c6e637be704215b6b2c433d487e7b2e859e45"} Nov 28 15:41:55 crc kubenswrapper[4647]: I1128 15:41:55.631734 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"ddac05b57ccf154595f574eb34e79a5235a1fcd531e5a1b0a4d0f351c3379353"} Nov 28 15:41:56 crc kubenswrapper[4647]: I1128 15:41:56.405235 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78fd3ad0-1139-4a99-b47c-65fac18d7576" path="/var/lib/kubelet/pods/78fd3ad0-1139-4a99-b47c-65fac18d7576/volumes" Nov 28 15:41:56 crc kubenswrapper[4647]: I1128 15:41:56.665243 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"112ebed118192ff9599722a4e4e34dbffff7aab6bb0914dfb0b542f288fbbfa5"} Nov 28 15:41:57 crc kubenswrapper[4647]: I1128 15:41:57.688407 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"3e67e73311e41bac1257bef6bc4c7878212225f323f8d77bcb5a3bf94e756b70"} Nov 28 15:41:59 crc kubenswrapper[4647]: I1128 15:41:59.538106 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 15:41:59 crc kubenswrapper[4647]: I1128 15:41:59.931111 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.134761 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-t7m57"] Nov 28 15:42:00 crc kubenswrapper[4647]: E1128 15:42:00.141664 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78fd3ad0-1139-4a99-b47c-65fac18d7576" containerName="ovn-config" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.141698 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="78fd3ad0-1139-4a99-b47c-65fac18d7576" containerName="ovn-config" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.141944 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="78fd3ad0-1139-4a99-b47c-65fac18d7576" containerName="ovn-config" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.142658 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-t7m57" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.188919 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-t7m57"] Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.252485 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g2km\" (UniqueName: \"kubernetes.io/projected/c6182ef5-d994-41fe-a7fc-d07b63bb0156-kube-api-access-8g2km\") pod \"barbican-db-create-t7m57\" (UID: \"c6182ef5-d994-41fe-a7fc-d07b63bb0156\") " pod="openstack/barbican-db-create-t7m57" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.354428 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g2km\" (UniqueName: \"kubernetes.io/projected/c6182ef5-d994-41fe-a7fc-d07b63bb0156-kube-api-access-8g2km\") pod \"barbican-db-create-t7m57\" (UID: \"c6182ef5-d994-41fe-a7fc-d07b63bb0156\") " pod="openstack/barbican-db-create-t7m57" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.375183 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g2km\" (UniqueName: \"kubernetes.io/projected/c6182ef5-d994-41fe-a7fc-d07b63bb0156-kube-api-access-8g2km\") pod \"barbican-db-create-t7m57\" (UID: \"c6182ef5-d994-41fe-a7fc-d07b63bb0156\") " pod="openstack/barbican-db-create-t7m57" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.458852 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-t7m57" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.481369 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-ftfj5"] Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.482444 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ftfj5" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.511474 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-ftfj5"] Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.549240 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-4rng7"] Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.551846 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.555470 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.555672 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.556835 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsffc\" (UniqueName: \"kubernetes.io/projected/75bbefac-ee0e-4ad3-92c2-38c7dc33c733-kube-api-access-jsffc\") pod \"cinder-db-create-ftfj5\" (UID: \"75bbefac-ee0e-4ad3-92c2-38c7dc33c733\") " pod="openstack/cinder-db-create-ftfj5" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.557560 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-7j874" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.557694 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.596339 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-4rng7"] Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.654818 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-6rfmd"] Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.656919 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-6rfmd" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.657951 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-config-data\") pod \"keystone-db-sync-4rng7\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.657998 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-combined-ca-bundle\") pod \"keystone-db-sync-4rng7\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.658060 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsffc\" (UniqueName: \"kubernetes.io/projected/75bbefac-ee0e-4ad3-92c2-38c7dc33c733-kube-api-access-jsffc\") pod \"cinder-db-create-ftfj5\" (UID: \"75bbefac-ee0e-4ad3-92c2-38c7dc33c733\") " pod="openstack/cinder-db-create-ftfj5" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.658149 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8rzz\" (UniqueName: \"kubernetes.io/projected/d4796edf-cb5a-41a8-b06b-25233998f32b-kube-api-access-q8rzz\") pod \"keystone-db-sync-4rng7\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.666463 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-6rfmd"] Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.688744 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsffc\" (UniqueName: \"kubernetes.io/projected/75bbefac-ee0e-4ad3-92c2-38c7dc33c733-kube-api-access-jsffc\") pod \"cinder-db-create-ftfj5\" (UID: \"75bbefac-ee0e-4ad3-92c2-38c7dc33c733\") " pod="openstack/cinder-db-create-ftfj5" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.759541 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-config-data\") pod \"keystone-db-sync-4rng7\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.759593 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-combined-ca-bundle\") pod \"keystone-db-sync-4rng7\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.759668 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5tqj\" (UniqueName: \"kubernetes.io/projected/e65030f5-e153-41fc-9765-c4d02d353ef1-kube-api-access-z5tqj\") pod \"neutron-db-create-6rfmd\" (UID: \"e65030f5-e153-41fc-9765-c4d02d353ef1\") " pod="openstack/neutron-db-create-6rfmd" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.759755 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8rzz\" (UniqueName: \"kubernetes.io/projected/d4796edf-cb5a-41a8-b06b-25233998f32b-kube-api-access-q8rzz\") pod \"keystone-db-sync-4rng7\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.763726 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-combined-ca-bundle\") pod \"keystone-db-sync-4rng7\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.776225 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-config-data\") pod \"keystone-db-sync-4rng7\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.787834 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8rzz\" (UniqueName: \"kubernetes.io/projected/d4796edf-cb5a-41a8-b06b-25233998f32b-kube-api-access-q8rzz\") pod \"keystone-db-sync-4rng7\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.800035 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ftfj5" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.861525 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5tqj\" (UniqueName: \"kubernetes.io/projected/e65030f5-e153-41fc-9765-c4d02d353ef1-kube-api-access-z5tqj\") pod \"neutron-db-create-6rfmd\" (UID: \"e65030f5-e153-41fc-9765-c4d02d353ef1\") " pod="openstack/neutron-db-create-6rfmd" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.866071 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.896295 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5tqj\" (UniqueName: \"kubernetes.io/projected/e65030f5-e153-41fc-9765-c4d02d353ef1-kube-api-access-z5tqj\") pod \"neutron-db-create-6rfmd\" (UID: \"e65030f5-e153-41fc-9765-c4d02d353ef1\") " pod="openstack/neutron-db-create-6rfmd" Nov 28 15:42:00 crc kubenswrapper[4647]: I1128 15:42:00.977810 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-6rfmd" Nov 28 15:42:08 crc kubenswrapper[4647]: E1128 15:42:08.301684 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 28 15:42:08 crc kubenswrapper[4647]: E1128 15:42:08.302483 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rsdsx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-9gx56_openstack(b5b74ca6-04a9-46a3-8aa2-658580db07c0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:42:08 crc kubenswrapper[4647]: E1128 15:42:08.304066 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-9gx56" podUID="b5b74ca6-04a9-46a3-8aa2-658580db07c0" Nov 28 15:42:08 crc kubenswrapper[4647]: I1128 15:42:08.799815 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"a9b83c5d1881b768b988758c2057f560df7ad47ff33051550782306e654cc4f0"} Nov 28 15:42:08 crc kubenswrapper[4647]: E1128 15:42:08.803967 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-9gx56" podUID="b5b74ca6-04a9-46a3-8aa2-658580db07c0" Nov 28 15:42:08 crc kubenswrapper[4647]: I1128 15:42:08.895892 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-4rng7"] Nov 28 15:42:08 crc kubenswrapper[4647]: I1128 15:42:08.969065 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-6rfmd"] Nov 28 15:42:08 crc kubenswrapper[4647]: I1128 15:42:08.980729 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-ftfj5"] Nov 28 15:42:08 crc kubenswrapper[4647]: I1128 15:42:08.994218 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-t7m57"] Nov 28 15:42:09 crc kubenswrapper[4647]: W1128 15:42:09.001438 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75bbefac_ee0e_4ad3_92c2_38c7dc33c733.slice/crio-e7e0a5e557332fa50de3d5c7650c7fb112a3238de659792ee86f233d721b7422 WatchSource:0}: Error finding container e7e0a5e557332fa50de3d5c7650c7fb112a3238de659792ee86f233d721b7422: Status 404 returned error can't find the container with id e7e0a5e557332fa50de3d5c7650c7fb112a3238de659792ee86f233d721b7422 Nov 28 15:42:09 crc kubenswrapper[4647]: W1128 15:42:09.006907 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6182ef5_d994_41fe_a7fc_d07b63bb0156.slice/crio-d92e1d1ee79253c2343cff1d8010ea230ecad1ecedb21bd9daa9e2c4ce3e4249 WatchSource:0}: Error finding container d92e1d1ee79253c2343cff1d8010ea230ecad1ecedb21bd9daa9e2c4ce3e4249: Status 404 returned error can't find the container with id d92e1d1ee79253c2343cff1d8010ea230ecad1ecedb21bd9daa9e2c4ce3e4249 Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.811181 4647 generic.go:334] "Generic (PLEG): container finished" podID="75bbefac-ee0e-4ad3-92c2-38c7dc33c733" containerID="f8208c12c3f309859fb2cfcd755392fbafac6502071e44c56fb7a18e9161d5c8" exitCode=0 Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.811257 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-ftfj5" event={"ID":"75bbefac-ee0e-4ad3-92c2-38c7dc33c733","Type":"ContainerDied","Data":"f8208c12c3f309859fb2cfcd755392fbafac6502071e44c56fb7a18e9161d5c8"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.811676 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-ftfj5" event={"ID":"75bbefac-ee0e-4ad3-92c2-38c7dc33c733","Type":"ContainerStarted","Data":"e7e0a5e557332fa50de3d5c7650c7fb112a3238de659792ee86f233d721b7422"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.813882 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4rng7" event={"ID":"d4796edf-cb5a-41a8-b06b-25233998f32b","Type":"ContainerStarted","Data":"079f0ff9a6bb7b6123e2e04c78b9fb15702bd5bb4945155034618a43e61a25f8"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.816558 4647 generic.go:334] "Generic (PLEG): container finished" podID="e65030f5-e153-41fc-9765-c4d02d353ef1" containerID="b61db8210975510a7c3df628ee1680245e15392c15c0651ad2b6b4632add8eb2" exitCode=0 Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.816608 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-6rfmd" event={"ID":"e65030f5-e153-41fc-9765-c4d02d353ef1","Type":"ContainerDied","Data":"b61db8210975510a7c3df628ee1680245e15392c15c0651ad2b6b4632add8eb2"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.816636 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-6rfmd" event={"ID":"e65030f5-e153-41fc-9765-c4d02d353ef1","Type":"ContainerStarted","Data":"db27e9e59b103c9fe1c62314c9dd20a8eb5ab97b3e2086d4883f5eb1aee57522"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.818388 4647 generic.go:334] "Generic (PLEG): container finished" podID="c6182ef5-d994-41fe-a7fc-d07b63bb0156" containerID="7cba9509a18cdcb6cb26891a229bb9a6dfbcb94bc2e43777d7b98186bb0c9c05" exitCode=0 Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.818493 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-t7m57" event={"ID":"c6182ef5-d994-41fe-a7fc-d07b63bb0156","Type":"ContainerDied","Data":"7cba9509a18cdcb6cb26891a229bb9a6dfbcb94bc2e43777d7b98186bb0c9c05"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.818513 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-t7m57" event={"ID":"c6182ef5-d994-41fe-a7fc-d07b63bb0156","Type":"ContainerStarted","Data":"d92e1d1ee79253c2343cff1d8010ea230ecad1ecedb21bd9daa9e2c4ce3e4249"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.831263 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"17c14f2cbbf0c105338ba0258a4ea5f42d25e0b7dbb198992fd7b6efc691450c"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.831343 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"463e056a3a66d344e860a04dc237b74a2223e58aef18f13e0d82b7424d66d662"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.831372 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"710707f73212be99c500bce7ab2942db9806d4b54c2db10f7be1573f59920c1c"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.831396 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"ab7ffd14-cb79-40b8-854d-1dd1deca75f2","Type":"ContainerStarted","Data":"67cf471ccd2a309a53f5092f1049b8e0b65843f4cedda5974a513428f0a9d001"} Nov 28 15:42:09 crc kubenswrapper[4647]: I1128 15:42:09.908529 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=48.018631353 podStartE2EDuration="55.908508486s" podCreationTimestamp="2025-11-28 15:41:14 +0000 UTC" firstStartedPulling="2025-11-28 15:41:48.50884037 +0000 UTC m=+1038.356446791" lastFinishedPulling="2025-11-28 15:41:56.398717513 +0000 UTC m=+1046.246323924" observedRunningTime="2025-11-28 15:42:09.906647667 +0000 UTC m=+1059.754254088" watchObservedRunningTime="2025-11-28 15:42:09.908508486 +0000 UTC m=+1059.756114907" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.272935 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-67qmp"] Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.276650 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.279223 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.296061 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-67qmp"] Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.368433 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxxzl\" (UniqueName: \"kubernetes.io/projected/0a477d1b-0255-4aef-8415-5297f93df84b-kube-api-access-hxxzl\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.368495 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-config\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.368525 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.368636 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.368672 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.368703 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.470590 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.471992 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.472511 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.472598 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.473339 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxxzl\" (UniqueName: \"kubernetes.io/projected/0a477d1b-0255-4aef-8415-5297f93df84b-kube-api-access-hxxzl\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.473404 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-config\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.473874 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.474366 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-config\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.474642 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.475315 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.475994 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.492812 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxxzl\" (UniqueName: \"kubernetes.io/projected/0a477d1b-0255-4aef-8415-5297f93df84b-kube-api-access-hxxzl\") pod \"dnsmasq-dns-6d5b6d6b67-67qmp\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:10 crc kubenswrapper[4647]: I1128 15:42:10.593251 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:11 crc kubenswrapper[4647]: E1128 15:42:11.021636 4647 kubelet_node_status.go:756] "Failed to set some node status fields" err="failed to validate nodeIP: route ip+net: no such network interface" node="crc" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.125447 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-67qmp"] Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.265217 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ftfj5" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.298008 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsffc\" (UniqueName: \"kubernetes.io/projected/75bbefac-ee0e-4ad3-92c2-38c7dc33c733-kube-api-access-jsffc\") pod \"75bbefac-ee0e-4ad3-92c2-38c7dc33c733\" (UID: \"75bbefac-ee0e-4ad3-92c2-38c7dc33c733\") " Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.301481 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-6rfmd" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.312354 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75bbefac-ee0e-4ad3-92c2-38c7dc33c733-kube-api-access-jsffc" (OuterVolumeSpecName: "kube-api-access-jsffc") pod "75bbefac-ee0e-4ad3-92c2-38c7dc33c733" (UID: "75bbefac-ee0e-4ad3-92c2-38c7dc33c733"). InnerVolumeSpecName "kube-api-access-jsffc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.336277 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-t7m57" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.399014 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8g2km\" (UniqueName: \"kubernetes.io/projected/c6182ef5-d994-41fe-a7fc-d07b63bb0156-kube-api-access-8g2km\") pod \"c6182ef5-d994-41fe-a7fc-d07b63bb0156\" (UID: \"c6182ef5-d994-41fe-a7fc-d07b63bb0156\") " Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.399603 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5tqj\" (UniqueName: \"kubernetes.io/projected/e65030f5-e153-41fc-9765-c4d02d353ef1-kube-api-access-z5tqj\") pod \"e65030f5-e153-41fc-9765-c4d02d353ef1\" (UID: \"e65030f5-e153-41fc-9765-c4d02d353ef1\") " Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.400065 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsffc\" (UniqueName: \"kubernetes.io/projected/75bbefac-ee0e-4ad3-92c2-38c7dc33c733-kube-api-access-jsffc\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.404735 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e65030f5-e153-41fc-9765-c4d02d353ef1-kube-api-access-z5tqj" (OuterVolumeSpecName: "kube-api-access-z5tqj") pod "e65030f5-e153-41fc-9765-c4d02d353ef1" (UID: "e65030f5-e153-41fc-9765-c4d02d353ef1"). InnerVolumeSpecName "kube-api-access-z5tqj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.404864 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6182ef5-d994-41fe-a7fc-d07b63bb0156-kube-api-access-8g2km" (OuterVolumeSpecName: "kube-api-access-8g2km") pod "c6182ef5-d994-41fe-a7fc-d07b63bb0156" (UID: "c6182ef5-d994-41fe-a7fc-d07b63bb0156"). InnerVolumeSpecName "kube-api-access-8g2km". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.501797 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5tqj\" (UniqueName: \"kubernetes.io/projected/e65030f5-e153-41fc-9765-c4d02d353ef1-kube-api-access-z5tqj\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.501835 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8g2km\" (UniqueName: \"kubernetes.io/projected/c6182ef5-d994-41fe-a7fc-d07b63bb0156-kube-api-access-8g2km\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.856386 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-6rfmd" event={"ID":"e65030f5-e153-41fc-9765-c4d02d353ef1","Type":"ContainerDied","Data":"db27e9e59b103c9fe1c62314c9dd20a8eb5ab97b3e2086d4883f5eb1aee57522"} Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.856465 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db27e9e59b103c9fe1c62314c9dd20a8eb5ab97b3e2086d4883f5eb1aee57522" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.856463 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-6rfmd" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.859705 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-t7m57" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.859695 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-t7m57" event={"ID":"c6182ef5-d994-41fe-a7fc-d07b63bb0156","Type":"ContainerDied","Data":"d92e1d1ee79253c2343cff1d8010ea230ecad1ecedb21bd9daa9e2c4ce3e4249"} Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.859819 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d92e1d1ee79253c2343cff1d8010ea230ecad1ecedb21bd9daa9e2c4ce3e4249" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.861654 4647 generic.go:334] "Generic (PLEG): container finished" podID="0a477d1b-0255-4aef-8415-5297f93df84b" containerID="354bf77e9451bcb5641bdd9c7a7ea1368c5584e2d9219698e23563b126098da9" exitCode=0 Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.861732 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" event={"ID":"0a477d1b-0255-4aef-8415-5297f93df84b","Type":"ContainerDied","Data":"354bf77e9451bcb5641bdd9c7a7ea1368c5584e2d9219698e23563b126098da9"} Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.861832 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" event={"ID":"0a477d1b-0255-4aef-8415-5297f93df84b","Type":"ContainerStarted","Data":"7cf9506a2b93a7b19ab834ba59b59dd45b9aee005fbe651b6503f6694eddbdc0"} Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.865791 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-ftfj5" event={"ID":"75bbefac-ee0e-4ad3-92c2-38c7dc33c733","Type":"ContainerDied","Data":"e7e0a5e557332fa50de3d5c7650c7fb112a3238de659792ee86f233d721b7422"} Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.865822 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7e0a5e557332fa50de3d5c7650c7fb112a3238de659792ee86f233d721b7422" Nov 28 15:42:11 crc kubenswrapper[4647]: I1128 15:42:11.865882 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ftfj5" Nov 28 15:42:12 crc kubenswrapper[4647]: I1128 15:42:12.878951 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" event={"ID":"0a477d1b-0255-4aef-8415-5297f93df84b","Type":"ContainerStarted","Data":"ad0e0b2e3a3c83519f8a6aa7dcae530de651e0115c582b271c06fdb7f4eb07e2"} Nov 28 15:42:12 crc kubenswrapper[4647]: I1128 15:42:12.881721 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:12 crc kubenswrapper[4647]: I1128 15:42:12.905750 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" podStartSLOduration=2.905726615 podStartE2EDuration="2.905726615s" podCreationTimestamp="2025-11-28 15:42:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:12.901473783 +0000 UTC m=+1062.749080214" watchObservedRunningTime="2025-11-28 15:42:12.905726615 +0000 UTC m=+1062.753333056" Nov 28 15:42:15 crc kubenswrapper[4647]: I1128 15:42:15.912757 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4rng7" event={"ID":"d4796edf-cb5a-41a8-b06b-25233998f32b","Type":"ContainerStarted","Data":"69ff0e64181b0f15e2178f8d9315a32ec4cfaa7d2bd0e4ce84313b9548ac2bd7"} Nov 28 15:42:17 crc kubenswrapper[4647]: I1128 15:42:17.023007 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:42:17 crc kubenswrapper[4647]: I1128 15:42:17.023101 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.289939 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-4rng7" podStartSLOduration=13.885836695 podStartE2EDuration="20.289916403s" podCreationTimestamp="2025-11-28 15:42:00 +0000 UTC" firstStartedPulling="2025-11-28 15:42:08.903303794 +0000 UTC m=+1058.750910215" lastFinishedPulling="2025-11-28 15:42:15.307383502 +0000 UTC m=+1065.154989923" observedRunningTime="2025-11-28 15:42:15.941662313 +0000 UTC m=+1065.789268754" watchObservedRunningTime="2025-11-28 15:42:20.289916403 +0000 UTC m=+1070.137522824" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.291066 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-1460-account-create-9fj85"] Nov 28 15:42:20 crc kubenswrapper[4647]: E1128 15:42:20.291435 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6182ef5-d994-41fe-a7fc-d07b63bb0156" containerName="mariadb-database-create" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.291448 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6182ef5-d994-41fe-a7fc-d07b63bb0156" containerName="mariadb-database-create" Nov 28 15:42:20 crc kubenswrapper[4647]: E1128 15:42:20.291476 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e65030f5-e153-41fc-9765-c4d02d353ef1" containerName="mariadb-database-create" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.291484 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e65030f5-e153-41fc-9765-c4d02d353ef1" containerName="mariadb-database-create" Nov 28 15:42:20 crc kubenswrapper[4647]: E1128 15:42:20.291514 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75bbefac-ee0e-4ad3-92c2-38c7dc33c733" containerName="mariadb-database-create" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.291520 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="75bbefac-ee0e-4ad3-92c2-38c7dc33c733" containerName="mariadb-database-create" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.291678 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e65030f5-e153-41fc-9765-c4d02d353ef1" containerName="mariadb-database-create" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.291698 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="75bbefac-ee0e-4ad3-92c2-38c7dc33c733" containerName="mariadb-database-create" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.291720 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6182ef5-d994-41fe-a7fc-d07b63bb0156" containerName="mariadb-database-create" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.292260 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1460-account-create-9fj85" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.295252 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.306612 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1460-account-create-9fj85"] Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.406996 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6m45\" (UniqueName: \"kubernetes.io/projected/e013c7a9-cf78-48c6-bd86-50065e0d2d19-kube-api-access-z6m45\") pod \"barbican-1460-account-create-9fj85\" (UID: \"e013c7a9-cf78-48c6-bd86-50065e0d2d19\") " pod="openstack/barbican-1460-account-create-9fj85" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.499436 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-45f8-account-create-5nhrv"] Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.500832 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-45f8-account-create-5nhrv" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.505163 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.509186 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-45f8-account-create-5nhrv"] Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.510603 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6m45\" (UniqueName: \"kubernetes.io/projected/e013c7a9-cf78-48c6-bd86-50065e0d2d19-kube-api-access-z6m45\") pod \"barbican-1460-account-create-9fj85\" (UID: \"e013c7a9-cf78-48c6-bd86-50065e0d2d19\") " pod="openstack/barbican-1460-account-create-9fj85" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.547706 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6m45\" (UniqueName: \"kubernetes.io/projected/e013c7a9-cf78-48c6-bd86-50065e0d2d19-kube-api-access-z6m45\") pod \"barbican-1460-account-create-9fj85\" (UID: \"e013c7a9-cf78-48c6-bd86-50065e0d2d19\") " pod="openstack/barbican-1460-account-create-9fj85" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.594628 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.613904 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz4fm\" (UniqueName: \"kubernetes.io/projected/a202ad93-2e07-4d93-a9db-a5a1b095f22b-kube-api-access-tz4fm\") pod \"cinder-45f8-account-create-5nhrv\" (UID: \"a202ad93-2e07-4d93-a9db-a5a1b095f22b\") " pod="openstack/cinder-45f8-account-create-5nhrv" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.627716 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1460-account-create-9fj85" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.692696 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-krsc9"] Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.693025 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" podUID="1de90ce8-1534-4003-b298-49df66ba86f1" containerName="dnsmasq-dns" containerID="cri-o://badb58cba2b5c07c01a67494cf365df196ed0be67916469e9b303908a2f267d5" gracePeriod=10 Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.716258 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz4fm\" (UniqueName: \"kubernetes.io/projected/a202ad93-2e07-4d93-a9db-a5a1b095f22b-kube-api-access-tz4fm\") pod \"cinder-45f8-account-create-5nhrv\" (UID: \"a202ad93-2e07-4d93-a9db-a5a1b095f22b\") " pod="openstack/cinder-45f8-account-create-5nhrv" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.734686 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-4869-account-create-5n2xv"] Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.737431 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4869-account-create-5n2xv" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.746116 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.750247 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4869-account-create-5n2xv"] Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.755188 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz4fm\" (UniqueName: \"kubernetes.io/projected/a202ad93-2e07-4d93-a9db-a5a1b095f22b-kube-api-access-tz4fm\") pod \"cinder-45f8-account-create-5nhrv\" (UID: \"a202ad93-2e07-4d93-a9db-a5a1b095f22b\") " pod="openstack/cinder-45f8-account-create-5nhrv" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.818655 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jllk\" (UniqueName: \"kubernetes.io/projected/a46198d8-04b6-4efe-8bf9-49a8babad6b4-kube-api-access-6jllk\") pod \"neutron-4869-account-create-5n2xv\" (UID: \"a46198d8-04b6-4efe-8bf9-49a8babad6b4\") " pod="openstack/neutron-4869-account-create-5n2xv" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.820737 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-45f8-account-create-5nhrv" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.830789 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" podUID="1de90ce8-1534-4003-b298-49df66ba86f1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.110:5353: connect: connection refused" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.920087 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jllk\" (UniqueName: \"kubernetes.io/projected/a46198d8-04b6-4efe-8bf9-49a8babad6b4-kube-api-access-6jllk\") pod \"neutron-4869-account-create-5n2xv\" (UID: \"a46198d8-04b6-4efe-8bf9-49a8babad6b4\") " pod="openstack/neutron-4869-account-create-5n2xv" Nov 28 15:42:20 crc kubenswrapper[4647]: I1128 15:42:20.938018 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jllk\" (UniqueName: \"kubernetes.io/projected/a46198d8-04b6-4efe-8bf9-49a8babad6b4-kube-api-access-6jllk\") pod \"neutron-4869-account-create-5n2xv\" (UID: \"a46198d8-04b6-4efe-8bf9-49a8babad6b4\") " pod="openstack/neutron-4869-account-create-5n2xv" Nov 28 15:42:21 crc kubenswrapper[4647]: I1128 15:42:21.097783 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4869-account-create-5n2xv" Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:21.468007 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-45f8-account-create-5nhrv"] Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:21.968347 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-45f8-account-create-5nhrv" event={"ID":"a202ad93-2e07-4d93-a9db-a5a1b095f22b","Type":"ContainerStarted","Data":"5a978313ddaaefcff09479c5e53dc2fd3f7bbde89d2ea48ad0aa08df22d9fe05"} Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:21.970886 4647 generic.go:334] "Generic (PLEG): container finished" podID="1de90ce8-1534-4003-b298-49df66ba86f1" containerID="badb58cba2b5c07c01a67494cf365df196ed0be67916469e9b303908a2f267d5" exitCode=0 Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:21.970910 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" event={"ID":"1de90ce8-1534-4003-b298-49df66ba86f1","Type":"ContainerDied","Data":"badb58cba2b5c07c01a67494cf365df196ed0be67916469e9b303908a2f267d5"} Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.712672 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1460-account-create-9fj85"] Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.714277 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4869-account-create-5n2xv"] Nov 28 15:42:22 crc kubenswrapper[4647]: W1128 15:42:22.731865 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda46198d8_04b6_4efe_8bf9_49a8babad6b4.slice/crio-a34d0b1e0b67614d27c846dc7cf152678354c34329f9e8f25cb1553fccc7d1eb WatchSource:0}: Error finding container a34d0b1e0b67614d27c846dc7cf152678354c34329f9e8f25cb1553fccc7d1eb: Status 404 returned error can't find the container with id a34d0b1e0b67614d27c846dc7cf152678354c34329f9e8f25cb1553fccc7d1eb Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.978992 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" event={"ID":"1de90ce8-1534-4003-b298-49df66ba86f1","Type":"ContainerDied","Data":"27d466c6a6f733e87ae284b8e1b137caf73d0e0bc405efabf2b7918b251a8d85"} Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.979030 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27d466c6a6f733e87ae284b8e1b137caf73d0e0bc405efabf2b7918b251a8d85" Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.983664 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9gx56" event={"ID":"b5b74ca6-04a9-46a3-8aa2-658580db07c0","Type":"ContainerStarted","Data":"0d1ba4f54393cc1a7d2d80a35761c9939500729559ff14ca733d9569bb273beb"} Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.986710 4647 generic.go:334] "Generic (PLEG): container finished" podID="a202ad93-2e07-4d93-a9db-a5a1b095f22b" containerID="e607e8e0690608ba241165256f71c6844d5044b363755b54b79014f43b3031fa" exitCode=0 Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.986795 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-45f8-account-create-5nhrv" event={"ID":"a202ad93-2e07-4d93-a9db-a5a1b095f22b","Type":"ContainerDied","Data":"e607e8e0690608ba241165256f71c6844d5044b363755b54b79014f43b3031fa"} Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.986881 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.987762 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1460-account-create-9fj85" event={"ID":"e013c7a9-cf78-48c6-bd86-50065e0d2d19","Type":"ContainerStarted","Data":"88f2cff735167519ae098bf60f1738943f46da67d166d758d8cee25c09edbaf2"} Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.989230 4647 generic.go:334] "Generic (PLEG): container finished" podID="d4796edf-cb5a-41a8-b06b-25233998f32b" containerID="69ff0e64181b0f15e2178f8d9315a32ec4cfaa7d2bd0e4ce84313b9548ac2bd7" exitCode=0 Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.989304 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4rng7" event={"ID":"d4796edf-cb5a-41a8-b06b-25233998f32b","Type":"ContainerDied","Data":"69ff0e64181b0f15e2178f8d9315a32ec4cfaa7d2bd0e4ce84313b9548ac2bd7"} Nov 28 15:42:22 crc kubenswrapper[4647]: I1128 15:42:22.991043 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4869-account-create-5n2xv" event={"ID":"a46198d8-04b6-4efe-8bf9-49a8babad6b4","Type":"ContainerStarted","Data":"a34d0b1e0b67614d27c846dc7cf152678354c34329f9e8f25cb1553fccc7d1eb"} Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.011263 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-9gx56" podStartSLOduration=2.458964649 podStartE2EDuration="35.011234703s" podCreationTimestamp="2025-11-28 15:41:48 +0000 UTC" firstStartedPulling="2025-11-28 15:41:49.083760121 +0000 UTC m=+1038.931366532" lastFinishedPulling="2025-11-28 15:42:21.636030165 +0000 UTC m=+1071.483636586" observedRunningTime="2025-11-28 15:42:22.998271912 +0000 UTC m=+1072.845878333" watchObservedRunningTime="2025-11-28 15:42:23.011234703 +0000 UTC m=+1072.858841124" Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.185249 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-nb\") pod \"1de90ce8-1534-4003-b298-49df66ba86f1\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.185704 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-dns-svc\") pod \"1de90ce8-1534-4003-b298-49df66ba86f1\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.185797 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbvxl\" (UniqueName: \"kubernetes.io/projected/1de90ce8-1534-4003-b298-49df66ba86f1-kube-api-access-rbvxl\") pod \"1de90ce8-1534-4003-b298-49df66ba86f1\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.185876 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-sb\") pod \"1de90ce8-1534-4003-b298-49df66ba86f1\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.185909 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-config\") pod \"1de90ce8-1534-4003-b298-49df66ba86f1\" (UID: \"1de90ce8-1534-4003-b298-49df66ba86f1\") " Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.206352 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1de90ce8-1534-4003-b298-49df66ba86f1-kube-api-access-rbvxl" (OuterVolumeSpecName: "kube-api-access-rbvxl") pod "1de90ce8-1534-4003-b298-49df66ba86f1" (UID: "1de90ce8-1534-4003-b298-49df66ba86f1"). InnerVolumeSpecName "kube-api-access-rbvxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.246468 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1de90ce8-1534-4003-b298-49df66ba86f1" (UID: "1de90ce8-1534-4003-b298-49df66ba86f1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.247299 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-config" (OuterVolumeSpecName: "config") pod "1de90ce8-1534-4003-b298-49df66ba86f1" (UID: "1de90ce8-1534-4003-b298-49df66ba86f1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.248214 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1de90ce8-1534-4003-b298-49df66ba86f1" (UID: "1de90ce8-1534-4003-b298-49df66ba86f1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.249576 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1de90ce8-1534-4003-b298-49df66ba86f1" (UID: "1de90ce8-1534-4003-b298-49df66ba86f1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.288148 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.288183 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.288194 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbvxl\" (UniqueName: \"kubernetes.io/projected/1de90ce8-1534-4003-b298-49df66ba86f1-kube-api-access-rbvxl\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.288204 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:23 crc kubenswrapper[4647]: I1128 15:42:23.288214 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de90ce8-1534-4003-b298-49df66ba86f1-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.006814 4647 generic.go:334] "Generic (PLEG): container finished" podID="e013c7a9-cf78-48c6-bd86-50065e0d2d19" containerID="125a556b37f54e4b1e278f6754ea18ce3283617383b660855ba48c9868d2ad6b" exitCode=0 Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.006958 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1460-account-create-9fj85" event={"ID":"e013c7a9-cf78-48c6-bd86-50065e0d2d19","Type":"ContainerDied","Data":"125a556b37f54e4b1e278f6754ea18ce3283617383b660855ba48c9868d2ad6b"} Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.009903 4647 generic.go:334] "Generic (PLEG): container finished" podID="a46198d8-04b6-4efe-8bf9-49a8babad6b4" containerID="37ebbc620e01d8d7bd6db309f510c0986df4c9cd46c8e2c170f9bcb9323cb00e" exitCode=0 Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.010188 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4869-account-create-5n2xv" event={"ID":"a46198d8-04b6-4efe-8bf9-49a8babad6b4","Type":"ContainerDied","Data":"37ebbc620e01d8d7bd6db309f510c0986df4c9cd46c8e2c170f9bcb9323cb00e"} Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.010367 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-krsc9" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.095986 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-krsc9"] Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.102821 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-krsc9"] Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.402730 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1de90ce8-1534-4003-b298-49df66ba86f1" path="/var/lib/kubelet/pods/1de90ce8-1534-4003-b298-49df66ba86f1/volumes" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.433772 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-45f8-account-create-5nhrv" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.440905 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.521085 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tz4fm\" (UniqueName: \"kubernetes.io/projected/a202ad93-2e07-4d93-a9db-a5a1b095f22b-kube-api-access-tz4fm\") pod \"a202ad93-2e07-4d93-a9db-a5a1b095f22b\" (UID: \"a202ad93-2e07-4d93-a9db-a5a1b095f22b\") " Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.521180 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-combined-ca-bundle\") pod \"d4796edf-cb5a-41a8-b06b-25233998f32b\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.522137 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-config-data\") pod \"d4796edf-cb5a-41a8-b06b-25233998f32b\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.522216 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8rzz\" (UniqueName: \"kubernetes.io/projected/d4796edf-cb5a-41a8-b06b-25233998f32b-kube-api-access-q8rzz\") pod \"d4796edf-cb5a-41a8-b06b-25233998f32b\" (UID: \"d4796edf-cb5a-41a8-b06b-25233998f32b\") " Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.526082 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a202ad93-2e07-4d93-a9db-a5a1b095f22b-kube-api-access-tz4fm" (OuterVolumeSpecName: "kube-api-access-tz4fm") pod "a202ad93-2e07-4d93-a9db-a5a1b095f22b" (UID: "a202ad93-2e07-4d93-a9db-a5a1b095f22b"). InnerVolumeSpecName "kube-api-access-tz4fm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.526640 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4796edf-cb5a-41a8-b06b-25233998f32b-kube-api-access-q8rzz" (OuterVolumeSpecName: "kube-api-access-q8rzz") pod "d4796edf-cb5a-41a8-b06b-25233998f32b" (UID: "d4796edf-cb5a-41a8-b06b-25233998f32b"). InnerVolumeSpecName "kube-api-access-q8rzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.553181 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4796edf-cb5a-41a8-b06b-25233998f32b" (UID: "d4796edf-cb5a-41a8-b06b-25233998f32b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.566972 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-config-data" (OuterVolumeSpecName: "config-data") pod "d4796edf-cb5a-41a8-b06b-25233998f32b" (UID: "d4796edf-cb5a-41a8-b06b-25233998f32b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.624669 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8rzz\" (UniqueName: \"kubernetes.io/projected/d4796edf-cb5a-41a8-b06b-25233998f32b-kube-api-access-q8rzz\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.624715 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tz4fm\" (UniqueName: \"kubernetes.io/projected/a202ad93-2e07-4d93-a9db-a5a1b095f22b-kube-api-access-tz4fm\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.624729 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4647]: I1128 15:42:24.624742 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4796edf-cb5a-41a8-b06b-25233998f32b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.027607 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-45f8-account-create-5nhrv" event={"ID":"a202ad93-2e07-4d93-a9db-a5a1b095f22b","Type":"ContainerDied","Data":"5a978313ddaaefcff09479c5e53dc2fd3f7bbde89d2ea48ad0aa08df22d9fe05"} Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.028046 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a978313ddaaefcff09479c5e53dc2fd3f7bbde89d2ea48ad0aa08df22d9fe05" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.028129 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-45f8-account-create-5nhrv" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.033117 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4rng7" event={"ID":"d4796edf-cb5a-41a8-b06b-25233998f32b","Type":"ContainerDied","Data":"079f0ff9a6bb7b6123e2e04c78b9fb15702bd5bb4945155034618a43e61a25f8"} Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.033348 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="079f0ff9a6bb7b6123e2e04c78b9fb15702bd5bb4945155034618a43e61a25f8" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.037280 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4rng7" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.306122 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-b77l6"] Nov 28 15:42:25 crc kubenswrapper[4647]: E1128 15:42:25.306530 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a202ad93-2e07-4d93-a9db-a5a1b095f22b" containerName="mariadb-account-create" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.306548 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a202ad93-2e07-4d93-a9db-a5a1b095f22b" containerName="mariadb-account-create" Nov 28 15:42:25 crc kubenswrapper[4647]: E1128 15:42:25.306567 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1de90ce8-1534-4003-b298-49df66ba86f1" containerName="init" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.306574 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="1de90ce8-1534-4003-b298-49df66ba86f1" containerName="init" Nov 28 15:42:25 crc kubenswrapper[4647]: E1128 15:42:25.306593 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1de90ce8-1534-4003-b298-49df66ba86f1" containerName="dnsmasq-dns" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.306599 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="1de90ce8-1534-4003-b298-49df66ba86f1" containerName="dnsmasq-dns" Nov 28 15:42:25 crc kubenswrapper[4647]: E1128 15:42:25.306615 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4796edf-cb5a-41a8-b06b-25233998f32b" containerName="keystone-db-sync" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.306622 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4796edf-cb5a-41a8-b06b-25233998f32b" containerName="keystone-db-sync" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.306787 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4796edf-cb5a-41a8-b06b-25233998f32b" containerName="keystone-db-sync" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.306803 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="1de90ce8-1534-4003-b298-49df66ba86f1" containerName="dnsmasq-dns" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.306821 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="a202ad93-2e07-4d93-a9db-a5a1b095f22b" containerName="mariadb-account-create" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.307730 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.324944 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-b77l6"] Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.339671 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.339739 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.339798 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.339826 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.339894 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-config\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.339914 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92s4z\" (UniqueName: \"kubernetes.io/projected/c236552c-832d-4ff3-89d9-9150a61612ea-kube-api-access-92s4z\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.391214 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-njlfg"] Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.392356 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.403094 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.403149 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.403314 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.403340 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-7j874" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.409495 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-njlfg"] Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.445886 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.445982 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.446114 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-combined-ca-bundle\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.446135 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68z7k\" (UniqueName: \"kubernetes.io/projected/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-kube-api-access-68z7k\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.446155 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-fernet-keys\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.446220 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-config\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.446241 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92s4z\" (UniqueName: \"kubernetes.io/projected/c236552c-832d-4ff3-89d9-9150a61612ea-kube-api-access-92s4z\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.446265 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-config-data\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.446292 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-credential-keys\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.446326 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.446356 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-scripts\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.446432 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.450756 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.459451 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.460083 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.460194 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.466179 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-config\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.491629 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92s4z\" (UniqueName: \"kubernetes.io/projected/c236552c-832d-4ff3-89d9-9150a61612ea-kube-api-access-92s4z\") pod \"dnsmasq-dns-6f8c45789f-b77l6\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.549135 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-combined-ca-bundle\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.549189 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68z7k\" (UniqueName: \"kubernetes.io/projected/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-kube-api-access-68z7k\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.549244 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-fernet-keys\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.549286 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-config-data\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.550691 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-credential-keys\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.550729 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-scripts\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.560455 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-scripts\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.563932 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-fernet-keys\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.564301 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-config-data\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.573055 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-credential-keys\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.578909 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68z7k\" (UniqueName: \"kubernetes.io/projected/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-kube-api-access-68z7k\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.581532 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-combined-ca-bundle\") pod \"keystone-bootstrap-njlfg\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.641681 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5775b4c989-4jknn"] Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.643514 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.646296 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-s7zsm" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.646760 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.646929 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.647144 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.652517 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-scripts\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.652601 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wc62g\" (UniqueName: \"kubernetes.io/projected/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-kube-api-access-wc62g\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.652638 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-horizon-secret-key\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.652662 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-config-data\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.652700 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-logs\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.657925 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.676982 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1460-account-create-9fj85" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.699239 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4869-account-create-5n2xv" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.707282 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5775b4c989-4jknn"] Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.742660 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.774593 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jllk\" (UniqueName: \"kubernetes.io/projected/a46198d8-04b6-4efe-8bf9-49a8babad6b4-kube-api-access-6jllk\") pod \"a46198d8-04b6-4efe-8bf9-49a8babad6b4\" (UID: \"a46198d8-04b6-4efe-8bf9-49a8babad6b4\") " Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.774718 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6m45\" (UniqueName: \"kubernetes.io/projected/e013c7a9-cf78-48c6-bd86-50065e0d2d19-kube-api-access-z6m45\") pod \"e013c7a9-cf78-48c6-bd86-50065e0d2d19\" (UID: \"e013c7a9-cf78-48c6-bd86-50065e0d2d19\") " Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.775285 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-logs\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.775356 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-scripts\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.775426 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wc62g\" (UniqueName: \"kubernetes.io/projected/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-kube-api-access-wc62g\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.775464 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-horizon-secret-key\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.775492 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-config-data\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.779990 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-config-data\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.783734 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-logs\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.784059 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-scripts\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.790587 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a46198d8-04b6-4efe-8bf9-49a8babad6b4-kube-api-access-6jllk" (OuterVolumeSpecName: "kube-api-access-6jllk") pod "a46198d8-04b6-4efe-8bf9-49a8babad6b4" (UID: "a46198d8-04b6-4efe-8bf9-49a8babad6b4"). InnerVolumeSpecName "kube-api-access-6jllk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.795964 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-horizon-secret-key\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.796275 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e013c7a9-cf78-48c6-bd86-50065e0d2d19-kube-api-access-z6m45" (OuterVolumeSpecName: "kube-api-access-z6m45") pod "e013c7a9-cf78-48c6-bd86-50065e0d2d19" (UID: "e013c7a9-cf78-48c6-bd86-50065e0d2d19"). InnerVolumeSpecName "kube-api-access-z6m45". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.880611 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jllk\" (UniqueName: \"kubernetes.io/projected/a46198d8-04b6-4efe-8bf9-49a8babad6b4-kube-api-access-6jllk\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.880644 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6m45\" (UniqueName: \"kubernetes.io/projected/e013c7a9-cf78-48c6-bd86-50065e0d2d19-kube-api-access-z6m45\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.892514 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wc62g\" (UniqueName: \"kubernetes.io/projected/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-kube-api-access-wc62g\") pod \"horizon-5775b4c989-4jknn\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:25 crc kubenswrapper[4647]: I1128 15:42:25.898161 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.033474 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:26 crc kubenswrapper[4647]: E1128 15:42:26.033936 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e013c7a9-cf78-48c6-bd86-50065e0d2d19" containerName="mariadb-account-create" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.033950 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e013c7a9-cf78-48c6-bd86-50065e0d2d19" containerName="mariadb-account-create" Nov 28 15:42:26 crc kubenswrapper[4647]: E1128 15:42:26.033965 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a46198d8-04b6-4efe-8bf9-49a8babad6b4" containerName="mariadb-account-create" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.033971 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a46198d8-04b6-4efe-8bf9-49a8babad6b4" containerName="mariadb-account-create" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.034488 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e013c7a9-cf78-48c6-bd86-50065e0d2d19" containerName="mariadb-account-create" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.034508 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="a46198d8-04b6-4efe-8bf9-49a8babad6b4" containerName="mariadb-account-create" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.038279 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.047096 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.059184 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.113538 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-685658f7df-nxgvv"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.119272 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.127339 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1460-account-create-9fj85" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.127427 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1460-account-create-9fj85" event={"ID":"e013c7a9-cf78-48c6-bd86-50065e0d2d19","Type":"ContainerDied","Data":"88f2cff735167519ae098bf60f1738943f46da67d166d758d8cee25c09edbaf2"} Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.127496 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88f2cff735167519ae098bf60f1738943f46da67d166d758d8cee25c09edbaf2" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.144324 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4869-account-create-5n2xv" event={"ID":"a46198d8-04b6-4efe-8bf9-49a8babad6b4","Type":"ContainerDied","Data":"a34d0b1e0b67614d27c846dc7cf152678354c34329f9e8f25cb1553fccc7d1eb"} Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.144367 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a34d0b1e0b67614d27c846dc7cf152678354c34329f9e8f25cb1553fccc7d1eb" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.144450 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4869-account-create-5n2xv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.155459 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.184599 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-685658f7df-nxgvv"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.203981 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-4n48c"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.212875 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.215934 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.216219 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.216445 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-mk2rq" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.232126 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwg9m\" (UniqueName: \"kubernetes.io/projected/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-kube-api-access-zwg9m\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.232266 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psxg5\" (UniqueName: \"kubernetes.io/projected/d84941bb-fa7d-434b-87d7-cd6a65cddcac-kube-api-access-psxg5\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.232354 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-run-httpd\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.232503 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d84941bb-fa7d-434b-87d7-cd6a65cddcac-horizon-secret-key\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.232606 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-config-data\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.232640 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-scripts\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.232669 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-log-httpd\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.232829 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.232946 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-scripts\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.233055 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.233158 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d84941bb-fa7d-434b-87d7-cd6a65cddcac-logs\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.233194 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-config-data\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.233382 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-4n48c"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.280852 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-p265g"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.282316 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.291538 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.291776 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.291947 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-5ddwp" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.325607 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-b77l6"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335469 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwg9m\" (UniqueName: \"kubernetes.io/projected/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-kube-api-access-zwg9m\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335558 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-combined-ca-bundle\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335598 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psxg5\" (UniqueName: \"kubernetes.io/projected/d84941bb-fa7d-434b-87d7-cd6a65cddcac-kube-api-access-psxg5\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335616 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb420869-c42b-47a6-8e22-c4e263d9a666-logs\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335671 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-run-httpd\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335693 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6c8bp\" (UniqueName: \"kubernetes.io/projected/fb420869-c42b-47a6-8e22-c4e263d9a666-kube-api-access-6c8bp\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335733 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d84941bb-fa7d-434b-87d7-cd6a65cddcac-horizon-secret-key\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335760 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-scripts\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335783 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-config-data\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335802 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-scripts\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335818 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-log-httpd\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335841 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335881 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-scripts\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335912 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-config-data\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335947 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.335979 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d84941bb-fa7d-434b-87d7-cd6a65cddcac-logs\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.336004 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-config-data\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.338870 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-run-httpd\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.347869 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-log-httpd\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.348086 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d84941bb-fa7d-434b-87d7-cd6a65cddcac-logs\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.348823 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-scripts\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.349260 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-config-data\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.350395 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-p265g"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.352298 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.352388 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-config-data\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.353068 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-scripts\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.355345 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d84941bb-fa7d-434b-87d7-cd6a65cddcac-horizon-secret-key\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.357856 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwg9m\" (UniqueName: \"kubernetes.io/projected/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-kube-api-access-zwg9m\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.358219 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.363870 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psxg5\" (UniqueName: \"kubernetes.io/projected/d84941bb-fa7d-434b-87d7-cd6a65cddcac-kube-api-access-psxg5\") pod \"horizon-685658f7df-nxgvv\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.370609 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-gmvlv"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.372518 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.415918 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.417691 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-gmvlv"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442564 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-db-sync-config-data\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442673 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-combined-ca-bundle\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442700 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-scripts\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442720 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4f54b294-79b2-4097-9011-f094f66cc705-etc-machine-id\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442750 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb420869-c42b-47a6-8e22-c4e263d9a666-logs\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442774 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6c8bp\" (UniqueName: \"kubernetes.io/projected/fb420869-c42b-47a6-8e22-c4e263d9a666-kube-api-access-6c8bp\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442804 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-scripts\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442840 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-config-data\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442858 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-combined-ca-bundle\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442884 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-config-data\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.442905 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4mbr\" (UniqueName: \"kubernetes.io/projected/4f54b294-79b2-4097-9011-f094f66cc705-kube-api-access-h4mbr\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.445537 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb420869-c42b-47a6-8e22-c4e263d9a666-logs\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.469276 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-combined-ca-bundle\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.481246 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-scripts\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.481624 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-config-data\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.485433 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6c8bp\" (UniqueName: \"kubernetes.io/projected/fb420869-c42b-47a6-8e22-c4e263d9a666-kube-api-access-6c8bp\") pod \"placement-db-sync-4n48c\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.516809 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547074 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547125 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547180 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-config-data\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547205 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-combined-ca-bundle\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547240 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4mbr\" (UniqueName: \"kubernetes.io/projected/4f54b294-79b2-4097-9011-f094f66cc705-kube-api-access-h4mbr\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547279 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-db-sync-config-data\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547330 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547355 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547379 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-config\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547435 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-scripts\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547453 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4f54b294-79b2-4097-9011-f094f66cc705-etc-machine-id\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.547498 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qcl6\" (UniqueName: \"kubernetes.io/projected/836b0825-d6e3-4fd9-9a1c-34672a0c543c-kube-api-access-5qcl6\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.549062 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4f54b294-79b2-4097-9011-f094f66cc705-etc-machine-id\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.554108 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-db-sync-config-data\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.555978 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-scripts\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.555987 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-combined-ca-bundle\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.561376 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-config-data\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.571066 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4n48c" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.574373 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4mbr\" (UniqueName: \"kubernetes.io/projected/4f54b294-79b2-4097-9011-f094f66cc705-kube-api-access-h4mbr\") pod \"cinder-db-sync-p265g\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.618964 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-b77l6"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.652841 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.652882 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.652907 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-config\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.652979 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qcl6\" (UniqueName: \"kubernetes.io/projected/836b0825-d6e3-4fd9-9a1c-34672a0c543c-kube-api-access-5qcl6\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.653004 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.653021 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.653890 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.654606 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-config\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.654815 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.655053 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-p265g" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.655325 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.656351 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.676328 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qcl6\" (UniqueName: \"kubernetes.io/projected/836b0825-d6e3-4fd9-9a1c-34672a0c543c-kube-api-access-5qcl6\") pod \"dnsmasq-dns-fcfdd6f9f-gmvlv\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.751968 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.804980 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-njlfg"] Nov 28 15:42:26 crc kubenswrapper[4647]: I1128 15:42:26.830281 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5775b4c989-4jknn"] Nov 28 15:42:26 crc kubenswrapper[4647]: W1128 15:42:26.935862 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod332c2c0e_dd01_4aad_8bb6_d1b9ef056cc2.slice/crio-b75fd2003aa723c41dd6c8fc8661836bb5b3a6ede97c2fadbe036bec2deff1aa WatchSource:0}: Error finding container b75fd2003aa723c41dd6c8fc8661836bb5b3a6ede97c2fadbe036bec2deff1aa: Status 404 returned error can't find the container with id b75fd2003aa723c41dd6c8fc8661836bb5b3a6ede97c2fadbe036bec2deff1aa Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.000515 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.177486 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2","Type":"ContainerStarted","Data":"b75fd2003aa723c41dd6c8fc8661836bb5b3a6ede97c2fadbe036bec2deff1aa"} Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.180555 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" event={"ID":"c236552c-832d-4ff3-89d9-9150a61612ea","Type":"ContainerStarted","Data":"38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf"} Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.180643 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" event={"ID":"c236552c-832d-4ff3-89d9-9150a61612ea","Type":"ContainerStarted","Data":"fce37d12e496d955e996d0da693d6d384769cc4588f1af8433f9db65f3783562"} Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.180755 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" podUID="c236552c-832d-4ff3-89d9-9150a61612ea" containerName="init" containerID="cri-o://38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf" gracePeriod=10 Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.185487 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5775b4c989-4jknn" event={"ID":"2648dbdb-8c59-484b-bc7e-303dbbbf4b15","Type":"ContainerStarted","Data":"e6dedd5c35bf4e8a77f4092c1a8284f7df1bc1c2247349b8194dc7e45bb2c666"} Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.190871 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-njlfg" event={"ID":"6c9694ad-a36c-4cb6-ae82-a5354daf77fc","Type":"ContainerStarted","Data":"a710193f7fd0738fc576105f3e5eeb653a322dd55a893cfaec0a4c82ff2f309a"} Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.273834 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-njlfg" podStartSLOduration=2.27381109 podStartE2EDuration="2.27381109s" podCreationTimestamp="2025-11-28 15:42:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:27.24266388 +0000 UTC m=+1077.090270301" watchObservedRunningTime="2025-11-28 15:42:27.27381109 +0000 UTC m=+1077.121417501" Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.328274 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-685658f7df-nxgvv"] Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.414352 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-4n48c"] Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.569152 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-gmvlv"] Nov 28 15:42:27 crc kubenswrapper[4647]: I1128 15:42:27.607242 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-p265g"] Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.130429 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.204677 4647 generic.go:334] "Generic (PLEG): container finished" podID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" containerID="466e3045459719c468f1ee6316f7cc507d42efec36a2e7ca4a2a9cb8fe70cd4e" exitCode=0 Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.206017 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" event={"ID":"836b0825-d6e3-4fd9-9a1c-34672a0c543c","Type":"ContainerDied","Data":"466e3045459719c468f1ee6316f7cc507d42efec36a2e7ca4a2a9cb8fe70cd4e"} Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.206054 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" event={"ID":"836b0825-d6e3-4fd9-9a1c-34672a0c543c","Type":"ContainerStarted","Data":"7c0ebc76d28b6f59e4206eca42d8fba1664a1d87f01ebca00eca808c69bc1701"} Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.212577 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4n48c" event={"ID":"fb420869-c42b-47a6-8e22-c4e263d9a666","Type":"ContainerStarted","Data":"fcdbc55e5a093e0dc3917732bef1c4faaf0363e90f224be0facdb493319e0d65"} Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.241625 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-njlfg" event={"ID":"6c9694ad-a36c-4cb6-ae82-a5354daf77fc","Type":"ContainerStarted","Data":"85fa52db2ce9a76d2340a5988cec119e28bd70c88e3919e7ab94006773523f4b"} Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.266465 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-p265g" event={"ID":"4f54b294-79b2-4097-9011-f094f66cc705","Type":"ContainerStarted","Data":"c05262de4a7a039b884495f50bf3d452de408287d52ff65ae9959f176f2d38b6"} Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.268892 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-685658f7df-nxgvv" event={"ID":"d84941bb-fa7d-434b-87d7-cd6a65cddcac","Type":"ContainerStarted","Data":"21e7d84616e936a50745b340f1ff13b56bfea35477d5769256312c7ee3f58490"} Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.274606 4647 generic.go:334] "Generic (PLEG): container finished" podID="c236552c-832d-4ff3-89d9-9150a61612ea" containerID="38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf" exitCode=0 Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.274814 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" event={"ID":"c236552c-832d-4ff3-89d9-9150a61612ea","Type":"ContainerDied","Data":"38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf"} Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.274888 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" event={"ID":"c236552c-832d-4ff3-89d9-9150a61612ea","Type":"ContainerDied","Data":"fce37d12e496d955e996d0da693d6d384769cc4588f1af8433f9db65f3783562"} Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.274948 4647 scope.go:117] "RemoveContainer" containerID="38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.275161 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-b77l6" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.320359 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-svc\") pod \"c236552c-832d-4ff3-89d9-9150a61612ea\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.320470 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92s4z\" (UniqueName: \"kubernetes.io/projected/c236552c-832d-4ff3-89d9-9150a61612ea-kube-api-access-92s4z\") pod \"c236552c-832d-4ff3-89d9-9150a61612ea\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.320505 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-config\") pod \"c236552c-832d-4ff3-89d9-9150a61612ea\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.320532 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-nb\") pod \"c236552c-832d-4ff3-89d9-9150a61612ea\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.320634 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-swift-storage-0\") pod \"c236552c-832d-4ff3-89d9-9150a61612ea\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.320662 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-sb\") pod \"c236552c-832d-4ff3-89d9-9150a61612ea\" (UID: \"c236552c-832d-4ff3-89d9-9150a61612ea\") " Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.360218 4647 scope.go:117] "RemoveContainer" containerID="38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.360490 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c236552c-832d-4ff3-89d9-9150a61612ea" (UID: "c236552c-832d-4ff3-89d9-9150a61612ea"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:28 crc kubenswrapper[4647]: E1128 15:42:28.360998 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf\": container with ID starting with 38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf not found: ID does not exist" containerID="38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.361023 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf"} err="failed to get container status \"38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf\": rpc error: code = NotFound desc = could not find container \"38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf\": container with ID starting with 38212e5e9db9bdf01450baccec8a4b7bf87068654f13f94164d0601f866015cf not found: ID does not exist" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.376575 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c236552c-832d-4ff3-89d9-9150a61612ea-kube-api-access-92s4z" (OuterVolumeSpecName: "kube-api-access-92s4z") pod "c236552c-832d-4ff3-89d9-9150a61612ea" (UID: "c236552c-832d-4ff3-89d9-9150a61612ea"). InnerVolumeSpecName "kube-api-access-92s4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.405442 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c236552c-832d-4ff3-89d9-9150a61612ea" (UID: "c236552c-832d-4ff3-89d9-9150a61612ea"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.423734 4647 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.423765 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92s4z\" (UniqueName: \"kubernetes.io/projected/c236552c-832d-4ff3-89d9-9150a61612ea-kube-api-access-92s4z\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.423777 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.423958 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c236552c-832d-4ff3-89d9-9150a61612ea" (UID: "c236552c-832d-4ff3-89d9-9150a61612ea"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.434697 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-config" (OuterVolumeSpecName: "config") pod "c236552c-832d-4ff3-89d9-9150a61612ea" (UID: "c236552c-832d-4ff3-89d9-9150a61612ea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.439002 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c236552c-832d-4ff3-89d9-9150a61612ea" (UID: "c236552c-832d-4ff3-89d9-9150a61612ea"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.525669 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.525704 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.525715 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c236552c-832d-4ff3-89d9-9150a61612ea-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.565050 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5775b4c989-4jknn"] Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.621001 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5d9696645-7j2gj"] Nov 28 15:42:28 crc kubenswrapper[4647]: E1128 15:42:28.621426 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c236552c-832d-4ff3-89d9-9150a61612ea" containerName="init" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.621441 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="c236552c-832d-4ff3-89d9-9150a61612ea" containerName="init" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.621642 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="c236552c-832d-4ff3-89d9-9150a61612ea" containerName="init" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.622571 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.694478 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5d9696645-7j2gj"] Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.728356 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-config-data\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.728404 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-scripts\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.728449 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/12316c5a-0c4f-453b-85e5-4016ac495dd6-horizon-secret-key\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.728526 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12316c5a-0c4f-453b-85e5-4016ac495dd6-logs\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.728551 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qj2h\" (UniqueName: \"kubernetes.io/projected/12316c5a-0c4f-453b-85e5-4016ac495dd6-kube-api-access-5qj2h\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.786309 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-b77l6"] Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.808342 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.830488 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-b77l6"] Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.831703 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/12316c5a-0c4f-453b-85e5-4016ac495dd6-horizon-secret-key\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.831816 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12316c5a-0c4f-453b-85e5-4016ac495dd6-logs\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.831846 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qj2h\" (UniqueName: \"kubernetes.io/projected/12316c5a-0c4f-453b-85e5-4016ac495dd6-kube-api-access-5qj2h\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.831899 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-config-data\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.831925 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-scripts\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.832677 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-scripts\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.833482 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12316c5a-0c4f-453b-85e5-4016ac495dd6-logs\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.834298 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-config-data\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.837188 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/12316c5a-0c4f-453b-85e5-4016ac495dd6-horizon-secret-key\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.916127 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qj2h\" (UniqueName: \"kubernetes.io/projected/12316c5a-0c4f-453b-85e5-4016ac495dd6-kube-api-access-5qj2h\") pod \"horizon-5d9696645-7j2gj\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:28 crc kubenswrapper[4647]: I1128 15:42:28.999669 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:42:29 crc kubenswrapper[4647]: I1128 15:42:29.510567 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5d9696645-7j2gj"] Nov 28 15:42:29 crc kubenswrapper[4647]: W1128 15:42:29.529164 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12316c5a_0c4f_453b_85e5_4016ac495dd6.slice/crio-66adde51c32eff915c0be126a5551c3bf99f2e167785e5d4135a29801b837579 WatchSource:0}: Error finding container 66adde51c32eff915c0be126a5551c3bf99f2e167785e5d4135a29801b837579: Status 404 returned error can't find the container with id 66adde51c32eff915c0be126a5551c3bf99f2e167785e5d4135a29801b837579 Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.365917 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" event={"ID":"836b0825-d6e3-4fd9-9a1c-34672a0c543c","Type":"ContainerStarted","Data":"f29bee434829a4fff361ae7c67268f4684a89f6d65e41de7152c6dad108b5945"} Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.366804 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.386694 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d9696645-7j2gj" event={"ID":"12316c5a-0c4f-453b-85e5-4016ac495dd6","Type":"ContainerStarted","Data":"66adde51c32eff915c0be126a5551c3bf99f2e167785e5d4135a29801b837579"} Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.402194 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" podStartSLOduration=4.40215955 podStartE2EDuration="4.40215955s" podCreationTimestamp="2025-11-28 15:42:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:30.387042463 +0000 UTC m=+1080.234648884" watchObservedRunningTime="2025-11-28 15:42:30.40215955 +0000 UTC m=+1080.249765971" Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.489384 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c236552c-832d-4ff3-89d9-9150a61612ea" path="/var/lib/kubelet/pods/c236552c-832d-4ff3-89d9-9150a61612ea/volumes" Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.781513 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-n92kx"] Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.782643 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.785467 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.785697 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-p9vz4" Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.799031 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-n92kx"] Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.892394 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wt8b9\" (UniqueName: \"kubernetes.io/projected/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-kube-api-access-wt8b9\") pod \"barbican-db-sync-n92kx\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.892821 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-combined-ca-bundle\") pod \"barbican-db-sync-n92kx\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:30 crc kubenswrapper[4647]: I1128 15:42:30.892863 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-db-sync-config-data\") pod \"barbican-db-sync-n92kx\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.007949 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wt8b9\" (UniqueName: \"kubernetes.io/projected/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-kube-api-access-wt8b9\") pod \"barbican-db-sync-n92kx\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.008003 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-combined-ca-bundle\") pod \"barbican-db-sync-n92kx\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.008028 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-db-sync-config-data\") pod \"barbican-db-sync-n92kx\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.019635 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-db-sync-config-data\") pod \"barbican-db-sync-n92kx\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.027833 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wt8b9\" (UniqueName: \"kubernetes.io/projected/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-kube-api-access-wt8b9\") pod \"barbican-db-sync-n92kx\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.039009 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-combined-ca-bundle\") pod \"barbican-db-sync-n92kx\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.092875 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-s6szl"] Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.095503 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.100190 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-x6c45" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.100374 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.101872 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.121713 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-n92kx" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.135718 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-s6szl"] Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.216892 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-combined-ca-bundle\") pod \"neutron-db-sync-s6szl\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.216966 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjtv6\" (UniqueName: \"kubernetes.io/projected/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-kube-api-access-wjtv6\") pod \"neutron-db-sync-s6szl\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.217192 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-config\") pod \"neutron-db-sync-s6szl\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.322162 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-combined-ca-bundle\") pod \"neutron-db-sync-s6szl\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.322239 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjtv6\" (UniqueName: \"kubernetes.io/projected/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-kube-api-access-wjtv6\") pod \"neutron-db-sync-s6szl\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.322282 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-config\") pod \"neutron-db-sync-s6szl\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.333258 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-config\") pod \"neutron-db-sync-s6szl\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.375303 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjtv6\" (UniqueName: \"kubernetes.io/projected/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-kube-api-access-wjtv6\") pod \"neutron-db-sync-s6szl\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.393352 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-combined-ca-bundle\") pod \"neutron-db-sync-s6szl\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.468861 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-s6szl" Nov 28 15:42:31 crc kubenswrapper[4647]: I1128 15:42:31.903690 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-n92kx"] Nov 28 15:42:31 crc kubenswrapper[4647]: W1128 15:42:31.921510 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc34bccdb_127a_40c0_ac61_6e2f354a6c6d.slice/crio-ba0b768f477f3917ab94fe0d8005d585f8f90e3bcae8a05f16a6d9251f868108 WatchSource:0}: Error finding container ba0b768f477f3917ab94fe0d8005d585f8f90e3bcae8a05f16a6d9251f868108: Status 404 returned error can't find the container with id ba0b768f477f3917ab94fe0d8005d585f8f90e3bcae8a05f16a6d9251f868108 Nov 28 15:42:32 crc kubenswrapper[4647]: I1128 15:42:32.361543 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-s6szl"] Nov 28 15:42:32 crc kubenswrapper[4647]: W1128 15:42:32.380684 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67922fe2_4c18_4d43_8d80_ede2c34cb2c6.slice/crio-301be7fd17dbee4077be366fc11a3c7f08f4da9f46817945ba48ef22aa224149 WatchSource:0}: Error finding container 301be7fd17dbee4077be366fc11a3c7f08f4da9f46817945ba48ef22aa224149: Status 404 returned error can't find the container with id 301be7fd17dbee4077be366fc11a3c7f08f4da9f46817945ba48ef22aa224149 Nov 28 15:42:32 crc kubenswrapper[4647]: I1128 15:42:32.427877 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-s6szl" event={"ID":"67922fe2-4c18-4d43-8d80-ede2c34cb2c6","Type":"ContainerStarted","Data":"301be7fd17dbee4077be366fc11a3c7f08f4da9f46817945ba48ef22aa224149"} Nov 28 15:42:32 crc kubenswrapper[4647]: I1128 15:42:32.441705 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-n92kx" event={"ID":"c34bccdb-127a-40c0-ac61-6e2f354a6c6d","Type":"ContainerStarted","Data":"ba0b768f477f3917ab94fe0d8005d585f8f90e3bcae8a05f16a6d9251f868108"} Nov 28 15:42:33 crc kubenswrapper[4647]: I1128 15:42:33.472521 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-s6szl" event={"ID":"67922fe2-4c18-4d43-8d80-ede2c34cb2c6","Type":"ContainerStarted","Data":"dabdd852a15e2df9199eb64966465f6cdb6cfedcda7a23116dd81ba11d668988"} Nov 28 15:42:33 crc kubenswrapper[4647]: I1128 15:42:33.506564 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-s6szl" podStartSLOduration=2.5065473000000003 podStartE2EDuration="2.5065473s" podCreationTimestamp="2025-11-28 15:42:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:33.506002056 +0000 UTC m=+1083.353608477" watchObservedRunningTime="2025-11-28 15:42:33.5065473 +0000 UTC m=+1083.354153711" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.701485 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-685658f7df-nxgvv"] Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.757663 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-84947f5948-ml477"] Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.759162 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.768126 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.798951 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84947f5948-ml477"] Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.816829 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xxwc\" (UniqueName: \"kubernetes.io/projected/138caa60-71a7-49ba-9a82-42664b2b2276-kube-api-access-8xxwc\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.816906 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-config-data\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.816930 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-tls-certs\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.816947 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-secret-key\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.817057 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/138caa60-71a7-49ba-9a82-42664b2b2276-logs\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.817093 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-combined-ca-bundle\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.817111 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-scripts\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.918317 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xxwc\" (UniqueName: \"kubernetes.io/projected/138caa60-71a7-49ba-9a82-42664b2b2276-kube-api-access-8xxwc\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.918401 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-config-data\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.918444 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-secret-key\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.918462 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-tls-certs\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.918537 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/138caa60-71a7-49ba-9a82-42664b2b2276-logs\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.918570 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-combined-ca-bundle\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.918589 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-scripts\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.919360 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-scripts\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.920530 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/138caa60-71a7-49ba-9a82-42664b2b2276-logs\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.920615 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-config-data\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.926538 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-secret-key\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.930124 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-tls-certs\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.958056 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-combined-ca-bundle\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:34 crc kubenswrapper[4647]: I1128 15:42:34.965587 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xxwc\" (UniqueName: \"kubernetes.io/projected/138caa60-71a7-49ba-9a82-42664b2b2276-kube-api-access-8xxwc\") pod \"horizon-84947f5948-ml477\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.052379 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5d9696645-7j2gj"] Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.089532 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-66c6c46cdb-xgv7h"] Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.092080 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.104524 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84947f5948-ml477" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.123991 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/278aef39-0aaf-4d33-b167-0f0cca8248fd-config-data\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.124091 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/278aef39-0aaf-4d33-b167-0f0cca8248fd-combined-ca-bundle\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.124170 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/278aef39-0aaf-4d33-b167-0f0cca8248fd-horizon-secret-key\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.124237 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmjlm\" (UniqueName: \"kubernetes.io/projected/278aef39-0aaf-4d33-b167-0f0cca8248fd-kube-api-access-tmjlm\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.124269 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/278aef39-0aaf-4d33-b167-0f0cca8248fd-horizon-tls-certs\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.124311 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/278aef39-0aaf-4d33-b167-0f0cca8248fd-scripts\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.124362 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/278aef39-0aaf-4d33-b167-0f0cca8248fd-logs\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.147660 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-66c6c46cdb-xgv7h"] Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.227609 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/278aef39-0aaf-4d33-b167-0f0cca8248fd-combined-ca-bundle\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.228644 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/278aef39-0aaf-4d33-b167-0f0cca8248fd-horizon-secret-key\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.228790 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmjlm\" (UniqueName: \"kubernetes.io/projected/278aef39-0aaf-4d33-b167-0f0cca8248fd-kube-api-access-tmjlm\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.228977 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/278aef39-0aaf-4d33-b167-0f0cca8248fd-horizon-tls-certs\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.230713 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/278aef39-0aaf-4d33-b167-0f0cca8248fd-scripts\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.230908 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/278aef39-0aaf-4d33-b167-0f0cca8248fd-logs\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.231143 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/278aef39-0aaf-4d33-b167-0f0cca8248fd-config-data\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.233088 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/278aef39-0aaf-4d33-b167-0f0cca8248fd-logs\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.233095 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/278aef39-0aaf-4d33-b167-0f0cca8248fd-scripts\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.235774 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/278aef39-0aaf-4d33-b167-0f0cca8248fd-config-data\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.235895 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/278aef39-0aaf-4d33-b167-0f0cca8248fd-horizon-tls-certs\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.238878 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/278aef39-0aaf-4d33-b167-0f0cca8248fd-horizon-secret-key\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.250484 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/278aef39-0aaf-4d33-b167-0f0cca8248fd-combined-ca-bundle\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.255644 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmjlm\" (UniqueName: \"kubernetes.io/projected/278aef39-0aaf-4d33-b167-0f0cca8248fd-kube-api-access-tmjlm\") pod \"horizon-66c6c46cdb-xgv7h\" (UID: \"278aef39-0aaf-4d33-b167-0f0cca8248fd\") " pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.425369 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.512599 4647 generic.go:334] "Generic (PLEG): container finished" podID="6c9694ad-a36c-4cb6-ae82-a5354daf77fc" containerID="85fa52db2ce9a76d2340a5988cec119e28bd70c88e3919e7ab94006773523f4b" exitCode=0 Nov 28 15:42:35 crc kubenswrapper[4647]: I1128 15:42:35.512642 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-njlfg" event={"ID":"6c9694ad-a36c-4cb6-ae82-a5354daf77fc","Type":"ContainerDied","Data":"85fa52db2ce9a76d2340a5988cec119e28bd70c88e3919e7ab94006773523f4b"} Nov 28 15:42:36 crc kubenswrapper[4647]: I1128 15:42:36.754658 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:42:36 crc kubenswrapper[4647]: I1128 15:42:36.852392 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-67qmp"] Nov 28 15:42:36 crc kubenswrapper[4647]: I1128 15:42:36.852696 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" containerID="cri-o://ad0e0b2e3a3c83519f8a6aa7dcae530de651e0115c582b271c06fdb7f4eb07e2" gracePeriod=10 Nov 28 15:42:37 crc kubenswrapper[4647]: I1128 15:42:37.542739 4647 generic.go:334] "Generic (PLEG): container finished" podID="0a477d1b-0255-4aef-8415-5297f93df84b" containerID="ad0e0b2e3a3c83519f8a6aa7dcae530de651e0115c582b271c06fdb7f4eb07e2" exitCode=0 Nov 28 15:42:37 crc kubenswrapper[4647]: I1128 15:42:37.543193 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" event={"ID":"0a477d1b-0255-4aef-8415-5297f93df84b","Type":"ContainerDied","Data":"ad0e0b2e3a3c83519f8a6aa7dcae530de651e0115c582b271c06fdb7f4eb07e2"} Nov 28 15:42:40 crc kubenswrapper[4647]: I1128 15:42:40.580524 4647 generic.go:334] "Generic (PLEG): container finished" podID="b5b74ca6-04a9-46a3-8aa2-658580db07c0" containerID="0d1ba4f54393cc1a7d2d80a35761c9939500729559ff14ca733d9569bb273beb" exitCode=0 Nov 28 15:42:40 crc kubenswrapper[4647]: I1128 15:42:40.581004 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9gx56" event={"ID":"b5b74ca6-04a9-46a3-8aa2-658580db07c0","Type":"ContainerDied","Data":"0d1ba4f54393cc1a7d2d80a35761c9939500729559ff14ca733d9569bb273beb"} Nov 28 15:42:40 crc kubenswrapper[4647]: I1128 15:42:40.594802 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.128:5353: connect: connection refused" Nov 28 15:42:44 crc kubenswrapper[4647]: E1128 15:42:44.285973 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 28 15:42:44 crc kubenswrapper[4647]: E1128 15:42:44.286938 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n67fh54h657h55h64fh66ch699h655h89h576h54fh5d9h5f5h659hbbh5f7h595h7fh67dh5c5hb7h9dh64dhcch9ch549hbdhbh664h5d4h586h97q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zwg9m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.482333 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.483175 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9gx56" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.565995 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-68z7k\" (UniqueName: \"kubernetes.io/projected/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-kube-api-access-68z7k\") pod \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.566130 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-combined-ca-bundle\") pod \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.566245 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-config-data\") pod \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.566319 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-scripts\") pod \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.566355 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-combined-ca-bundle\") pod \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.566393 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsdsx\" (UniqueName: \"kubernetes.io/projected/b5b74ca6-04a9-46a3-8aa2-658580db07c0-kube-api-access-rsdsx\") pod \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.566445 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-fernet-keys\") pod \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.566535 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-db-sync-config-data\") pod \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.566593 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-credential-keys\") pod \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\" (UID: \"6c9694ad-a36c-4cb6-ae82-a5354daf77fc\") " Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.566704 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-config-data\") pod \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\" (UID: \"b5b74ca6-04a9-46a3-8aa2-658580db07c0\") " Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.586362 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-scripts" (OuterVolumeSpecName: "scripts") pod "6c9694ad-a36c-4cb6-ae82-a5354daf77fc" (UID: "6c9694ad-a36c-4cb6-ae82-a5354daf77fc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.586543 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b5b74ca6-04a9-46a3-8aa2-658580db07c0" (UID: "b5b74ca6-04a9-46a3-8aa2-658580db07c0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.601738 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6c9694ad-a36c-4cb6-ae82-a5354daf77fc" (UID: "6c9694ad-a36c-4cb6-ae82-a5354daf77fc"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.602501 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5b74ca6-04a9-46a3-8aa2-658580db07c0-kube-api-access-rsdsx" (OuterVolumeSpecName: "kube-api-access-rsdsx") pod "b5b74ca6-04a9-46a3-8aa2-658580db07c0" (UID: "b5b74ca6-04a9-46a3-8aa2-658580db07c0"). InnerVolumeSpecName "kube-api-access-rsdsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.606020 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-kube-api-access-68z7k" (OuterVolumeSpecName: "kube-api-access-68z7k") pod "6c9694ad-a36c-4cb6-ae82-a5354daf77fc" (UID: "6c9694ad-a36c-4cb6-ae82-a5354daf77fc"). InnerVolumeSpecName "kube-api-access-68z7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.617064 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6c9694ad-a36c-4cb6-ae82-a5354daf77fc" (UID: "6c9694ad-a36c-4cb6-ae82-a5354daf77fc"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.630553 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-config-data" (OuterVolumeSpecName: "config-data") pod "6c9694ad-a36c-4cb6-ae82-a5354daf77fc" (UID: "6c9694ad-a36c-4cb6-ae82-a5354daf77fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.633680 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c9694ad-a36c-4cb6-ae82-a5354daf77fc" (UID: "6c9694ad-a36c-4cb6-ae82-a5354daf77fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.634805 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-njlfg" event={"ID":"6c9694ad-a36c-4cb6-ae82-a5354daf77fc","Type":"ContainerDied","Data":"a710193f7fd0738fc576105f3e5eeb653a322dd55a893cfaec0a4c82ff2f309a"} Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.634922 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a710193f7fd0738fc576105f3e5eeb653a322dd55a893cfaec0a4c82ff2f309a" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.635083 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-njlfg" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.636757 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5b74ca6-04a9-46a3-8aa2-658580db07c0" (UID: "b5b74ca6-04a9-46a3-8aa2-658580db07c0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.645894 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9gx56" event={"ID":"b5b74ca6-04a9-46a3-8aa2-658580db07c0","Type":"ContainerDied","Data":"e0db575e3931b54988f1bcc25738584bb5def1222468ca2bd3a47358827622ba"} Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.645951 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0db575e3931b54988f1bcc25738584bb5def1222468ca2bd3a47358827622ba" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.646071 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9gx56" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.666913 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-config-data" (OuterVolumeSpecName: "config-data") pod "b5b74ca6-04a9-46a3-8aa2-658580db07c0" (UID: "b5b74ca6-04a9-46a3-8aa2-658580db07c0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.669106 4647 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.669214 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsdsx\" (UniqueName: \"kubernetes.io/projected/b5b74ca6-04a9-46a3-8aa2-658580db07c0-kube-api-access-rsdsx\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.669299 4647 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.669365 4647 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.669455 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.669568 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-68z7k\" (UniqueName: \"kubernetes.io/projected/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-kube-api-access-68z7k\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.669651 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.669703 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.669775 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c9694ad-a36c-4cb6-ae82-a5354daf77fc-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:44 crc kubenswrapper[4647]: I1128 15:42:44.669825 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5b74ca6-04a9-46a3-8aa2-658580db07c0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.652307 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-njlfg"] Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.669343 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-njlfg"] Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.745883 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-6k9fd"] Nov 28 15:42:45 crc kubenswrapper[4647]: E1128 15:42:45.746618 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5b74ca6-04a9-46a3-8aa2-658580db07c0" containerName="glance-db-sync" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.746632 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5b74ca6-04a9-46a3-8aa2-658580db07c0" containerName="glance-db-sync" Nov 28 15:42:45 crc kubenswrapper[4647]: E1128 15:42:45.746679 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c9694ad-a36c-4cb6-ae82-a5354daf77fc" containerName="keystone-bootstrap" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.746686 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c9694ad-a36c-4cb6-ae82-a5354daf77fc" containerName="keystone-bootstrap" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.746858 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5b74ca6-04a9-46a3-8aa2-658580db07c0" containerName="glance-db-sync" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.746878 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c9694ad-a36c-4cb6-ae82-a5354daf77fc" containerName="keystone-bootstrap" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.747800 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.751324 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.751363 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-7j874" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.751522 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.754776 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.762867 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6k9fd"] Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.791962 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-combined-ca-bundle\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.792030 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-credential-keys\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.792080 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-scripts\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.792109 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-config-data\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.792132 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pzbl\" (UniqueName: \"kubernetes.io/projected/75b1595c-cca5-4809-94cf-c1c0ec937c27-kube-api-access-8pzbl\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.792170 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-fernet-keys\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.893992 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-combined-ca-bundle\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.894063 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-credential-keys\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.894108 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-scripts\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.894135 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-config-data\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.894165 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pzbl\" (UniqueName: \"kubernetes.io/projected/75b1595c-cca5-4809-94cf-c1c0ec937c27-kube-api-access-8pzbl\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.894204 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-fernet-keys\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.911626 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-fernet-keys\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.911828 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-config-data\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.917680 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-scripts\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.927479 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pzbl\" (UniqueName: \"kubernetes.io/projected/75b1595c-cca5-4809-94cf-c1c0ec937c27-kube-api-access-8pzbl\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.927550 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-combined-ca-bundle\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:45 crc kubenswrapper[4647]: I1128 15:42:45.933062 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-credential-keys\") pod \"keystone-bootstrap-6k9fd\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.115790 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.395211 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-b2jxs"] Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.397602 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.450297 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c9694ad-a36c-4cb6-ae82-a5354daf77fc" path="/var/lib/kubelet/pods/6c9694ad-a36c-4cb6-ae82-a5354daf77fc/volumes" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.472486 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-b2jxs"] Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.508031 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.508202 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.508277 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.508309 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.508347 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-config\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.508392 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnvp5\" (UniqueName: \"kubernetes.io/projected/2853894d-6f8e-42c5-b7fb-8d4efb427915-kube-api-access-tnvp5\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.611997 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.612098 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.612124 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.612149 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-config\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.612174 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnvp5\" (UniqueName: \"kubernetes.io/projected/2853894d-6f8e-42c5-b7fb-8d4efb427915-kube-api-access-tnvp5\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.612227 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.614044 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.614839 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.616575 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-config\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.616756 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.624605 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.643437 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnvp5\" (UniqueName: \"kubernetes.io/projected/2853894d-6f8e-42c5-b7fb-8d4efb427915-kube-api-access-tnvp5\") pod \"dnsmasq-dns-57c957c4ff-b2jxs\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:46 crc kubenswrapper[4647]: I1128 15:42:46.737126 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.023051 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.023146 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.150126 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.154570 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.160537 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.190213 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.190225 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.190565 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-dp9jz" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.331573 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.331649 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-logs\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.331741 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmfdl\" (UniqueName: \"kubernetes.io/projected/9ca92afd-9d04-4e91-be1e-69c1665a5d20-kube-api-access-wmfdl\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.331783 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.331808 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-config-data\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.331839 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.331871 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-scripts\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.434268 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmfdl\" (UniqueName: \"kubernetes.io/projected/9ca92afd-9d04-4e91-be1e-69c1665a5d20-kube-api-access-wmfdl\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.434357 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.434381 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-config-data\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.434421 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.434450 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-scripts\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.434488 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.434524 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-logs\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.435183 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.435232 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.435723 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-logs\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.439838 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-scripts\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.443882 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-config-data\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.445752 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.455536 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmfdl\" (UniqueName: \"kubernetes.io/projected/9ca92afd-9d04-4e91-be1e-69c1665a5d20-kube-api-access-wmfdl\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.486554 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.510237 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.934259 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.938287 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.945037 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 15:42:47 crc kubenswrapper[4647]: I1128 15:42:47.948008 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.049339 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.049742 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.049896 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.050038 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.050192 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-logs\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.050307 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d97bs\" (UniqueName: \"kubernetes.io/projected/989faff9-98dc-4b76-b0bd-5c90aa13075b-kube-api-access-d97bs\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.050491 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.151882 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d97bs\" (UniqueName: \"kubernetes.io/projected/989faff9-98dc-4b76-b0bd-5c90aa13075b-kube-api-access-d97bs\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.152155 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.152254 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.152331 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.152740 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.152902 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.153003 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-logs\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.153268 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.153681 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.154181 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-logs\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.163507 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.170484 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.172446 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.188387 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d97bs\" (UniqueName: \"kubernetes.io/projected/989faff9-98dc-4b76-b0bd-5c90aa13075b-kube-api-access-d97bs\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.230921 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:48 crc kubenswrapper[4647]: I1128 15:42:48.259851 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:50 crc kubenswrapper[4647]: I1128 15:42:50.160600 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:42:50 crc kubenswrapper[4647]: I1128 15:42:50.251705 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:42:50 crc kubenswrapper[4647]: I1128 15:42:50.594897 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.128:5353: i/o timeout" Nov 28 15:42:52 crc kubenswrapper[4647]: E1128 15:42:52.878111 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 28 15:42:52 crc kubenswrapper[4647]: E1128 15:42:52.878353 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d8h647h59fh54fh9ch76hcdh58bh5fch689h594h56dh659h59dh5d4h84h57bh67fh544h68h555h57fhcch54ch5f4h5d4h69hd6hf4h544h5d8h589q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wc62g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5775b4c989-4jknn_openstack(2648dbdb-8c59-484b-bc7e-303dbbbf4b15): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:42:52 crc kubenswrapper[4647]: E1128 15:42:52.881323 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-5775b4c989-4jknn" podUID="2648dbdb-8c59-484b-bc7e-303dbbbf4b15" Nov 28 15:42:52 crc kubenswrapper[4647]: E1128 15:42:52.891631 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 28 15:42:52 crc kubenswrapper[4647]: E1128 15:42:52.892050 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n655h97h5b8hf4hf5hd8h54fh64h587h86h9ch67fh585hbdhc5h659h597hcdh576hbh5d9h689h58dhd4h54ch666h594h88hbh644hd9h5bcq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-psxg5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-685658f7df-nxgvv_openstack(d84941bb-fa7d-434b-87d7-cd6a65cddcac): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:42:52 crc kubenswrapper[4647]: E1128 15:42:52.897857 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-685658f7df-nxgvv" podUID="d84941bb-fa7d-434b-87d7-cd6a65cddcac" Nov 28 15:42:55 crc kubenswrapper[4647]: I1128 15:42:55.596305 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.128:5353: i/o timeout" Nov 28 15:42:55 crc kubenswrapper[4647]: I1128 15:42:55.596865 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:43:00 crc kubenswrapper[4647]: I1128 15:43:00.597059 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.128:5353: i/o timeout" Nov 28 15:43:05 crc kubenswrapper[4647]: I1128 15:43:05.598305 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.128:5353: i/o timeout" Nov 28 15:43:10 crc kubenswrapper[4647]: E1128 15:43:10.086486 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 28 15:43:10 crc kubenswrapper[4647]: E1128 15:43:10.087840 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n586hbdh649h5d5h66ch64h676h65dhdbh56ch78h6fh5d5hcfh5c4h5dbh5b7h66chffh5cbh644h68dh9bh588h647h5c9h67fh56ch5b4h5d4hc7h56dq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5qj2h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5d9696645-7j2gj_openstack(12316c5a-0c4f-453b-85e5-4016ac495dd6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:43:10 crc kubenswrapper[4647]: E1128 15:43:10.093261 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-5d9696645-7j2gj" podUID="12316c5a-0c4f-453b-85e5-4016ac495dd6" Nov 28 15:43:10 crc kubenswrapper[4647]: I1128 15:43:10.600340 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.128:5353: i/o timeout" Nov 28 15:43:11 crc kubenswrapper[4647]: E1128 15:43:11.914364 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Nov 28 15:43:11 crc kubenswrapper[4647]: E1128 15:43:11.916006 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6c8bp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-4n48c_openstack(fb420869-c42b-47a6-8e22-c4e263d9a666): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:43:11 crc kubenswrapper[4647]: E1128 15:43:11.917224 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-4n48c" podUID="fb420869-c42b-47a6-8e22-c4e263d9a666" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.027561 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" event={"ID":"0a477d1b-0255-4aef-8415-5297f93df84b","Type":"ContainerDied","Data":"7cf9506a2b93a7b19ab834ba59b59dd45b9aee005fbe651b6503f6694eddbdc0"} Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.027621 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cf9506a2b93a7b19ab834ba59b59dd45b9aee005fbe651b6503f6694eddbdc0" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.029776 4647 generic.go:334] "Generic (PLEG): container finished" podID="67922fe2-4c18-4d43-8d80-ede2c34cb2c6" containerID="dabdd852a15e2df9199eb64966465f6cdb6cfedcda7a23116dd81ba11d668988" exitCode=0 Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.029849 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-s6szl" event={"ID":"67922fe2-4c18-4d43-8d80-ede2c34cb2c6","Type":"ContainerDied","Data":"dabdd852a15e2df9199eb64966465f6cdb6cfedcda7a23116dd81ba11d668988"} Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.031395 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-685658f7df-nxgvv" event={"ID":"d84941bb-fa7d-434b-87d7-cd6a65cddcac","Type":"ContainerDied","Data":"21e7d84616e936a50745b340f1ff13b56bfea35477d5769256312c7ee3f58490"} Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.031440 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21e7d84616e936a50745b340f1ff13b56bfea35477d5769256312c7ee3f58490" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.033759 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5775b4c989-4jknn" event={"ID":"2648dbdb-8c59-484b-bc7e-303dbbbf4b15","Type":"ContainerDied","Data":"e6dedd5c35bf4e8a77f4092c1a8284f7df1bc1c2247349b8194dc7e45bb2c666"} Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.033811 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6dedd5c35bf4e8a77f4092c1a8284f7df1bc1c2247349b8194dc7e45bb2c666" Nov 28 15:43:12 crc kubenswrapper[4647]: E1128 15:43:12.036536 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-4n48c" podUID="fb420869-c42b-47a6-8e22-c4e263d9a666" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.066855 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.072158 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.083408 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.190028 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-svc\") pod \"0a477d1b-0255-4aef-8415-5297f93df84b\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.190151 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-config\") pod \"0a477d1b-0255-4aef-8415-5297f93df84b\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.190210 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wc62g\" (UniqueName: \"kubernetes.io/projected/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-kube-api-access-wc62g\") pod \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.190284 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-nb\") pod \"0a477d1b-0255-4aef-8415-5297f93df84b\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.190315 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-sb\") pod \"0a477d1b-0255-4aef-8415-5297f93df84b\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.190387 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-swift-storage-0\") pod \"0a477d1b-0255-4aef-8415-5297f93df84b\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.190459 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-scripts\") pod \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.190514 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-logs\") pod \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.191185 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-scripts" (OuterVolumeSpecName: "scripts") pod "d84941bb-fa7d-434b-87d7-cd6a65cddcac" (UID: "d84941bb-fa7d-434b-87d7-cd6a65cddcac"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.191766 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-logs" (OuterVolumeSpecName: "logs") pod "2648dbdb-8c59-484b-bc7e-303dbbbf4b15" (UID: "2648dbdb-8c59-484b-bc7e-303dbbbf4b15"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.191815 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxxzl\" (UniqueName: \"kubernetes.io/projected/0a477d1b-0255-4aef-8415-5297f93df84b-kube-api-access-hxxzl\") pod \"0a477d1b-0255-4aef-8415-5297f93df84b\" (UID: \"0a477d1b-0255-4aef-8415-5297f93df84b\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.191926 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-scripts\") pod \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.191977 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-config-data\") pod \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.192058 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-config-data\") pod \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.192089 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-horizon-secret-key\") pod \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\" (UID: \"2648dbdb-8c59-484b-bc7e-303dbbbf4b15\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.192145 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psxg5\" (UniqueName: \"kubernetes.io/projected/d84941bb-fa7d-434b-87d7-cd6a65cddcac-kube-api-access-psxg5\") pod \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.192189 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d84941bb-fa7d-434b-87d7-cd6a65cddcac-horizon-secret-key\") pod \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.192209 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d84941bb-fa7d-434b-87d7-cd6a65cddcac-logs\") pod \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\" (UID: \"d84941bb-fa7d-434b-87d7-cd6a65cddcac\") " Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.192801 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.192822 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.194753 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-scripts" (OuterVolumeSpecName: "scripts") pod "2648dbdb-8c59-484b-bc7e-303dbbbf4b15" (UID: "2648dbdb-8c59-484b-bc7e-303dbbbf4b15"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.195480 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-config-data" (OuterVolumeSpecName: "config-data") pod "d84941bb-fa7d-434b-87d7-cd6a65cddcac" (UID: "d84941bb-fa7d-434b-87d7-cd6a65cddcac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.196275 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-config-data" (OuterVolumeSpecName: "config-data") pod "2648dbdb-8c59-484b-bc7e-303dbbbf4b15" (UID: "2648dbdb-8c59-484b-bc7e-303dbbbf4b15"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.201232 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "2648dbdb-8c59-484b-bc7e-303dbbbf4b15" (UID: "2648dbdb-8c59-484b-bc7e-303dbbbf4b15"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.205199 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-kube-api-access-wc62g" (OuterVolumeSpecName: "kube-api-access-wc62g") pod "2648dbdb-8c59-484b-bc7e-303dbbbf4b15" (UID: "2648dbdb-8c59-484b-bc7e-303dbbbf4b15"). InnerVolumeSpecName "kube-api-access-wc62g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.205814 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a477d1b-0255-4aef-8415-5297f93df84b-kube-api-access-hxxzl" (OuterVolumeSpecName: "kube-api-access-hxxzl") pod "0a477d1b-0255-4aef-8415-5297f93df84b" (UID: "0a477d1b-0255-4aef-8415-5297f93df84b"). InnerVolumeSpecName "kube-api-access-hxxzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.206140 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d84941bb-fa7d-434b-87d7-cd6a65cddcac-logs" (OuterVolumeSpecName: "logs") pod "d84941bb-fa7d-434b-87d7-cd6a65cddcac" (UID: "d84941bb-fa7d-434b-87d7-cd6a65cddcac"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.209836 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d84941bb-fa7d-434b-87d7-cd6a65cddcac-kube-api-access-psxg5" (OuterVolumeSpecName: "kube-api-access-psxg5") pod "d84941bb-fa7d-434b-87d7-cd6a65cddcac" (UID: "d84941bb-fa7d-434b-87d7-cd6a65cddcac"). InnerVolumeSpecName "kube-api-access-psxg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.214998 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d84941bb-fa7d-434b-87d7-cd6a65cddcac-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "d84941bb-fa7d-434b-87d7-cd6a65cddcac" (UID: "d84941bb-fa7d-434b-87d7-cd6a65cddcac"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.255530 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0a477d1b-0255-4aef-8415-5297f93df84b" (UID: "0a477d1b-0255-4aef-8415-5297f93df84b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.260562 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0a477d1b-0255-4aef-8415-5297f93df84b" (UID: "0a477d1b-0255-4aef-8415-5297f93df84b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.261643 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0a477d1b-0255-4aef-8415-5297f93df84b" (UID: "0a477d1b-0255-4aef-8415-5297f93df84b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.263970 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-config" (OuterVolumeSpecName: "config") pod "0a477d1b-0255-4aef-8415-5297f93df84b" (UID: "0a477d1b-0255-4aef-8415-5297f93df84b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.268056 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0a477d1b-0255-4aef-8415-5297f93df84b" (UID: "0a477d1b-0255-4aef-8415-5297f93df84b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294371 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294401 4647 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294423 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxxzl\" (UniqueName: \"kubernetes.io/projected/0a477d1b-0255-4aef-8415-5297f93df84b-kube-api-access-hxxzl\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294437 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294447 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d84941bb-fa7d-434b-87d7-cd6a65cddcac-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294455 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294462 4647 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294472 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psxg5\" (UniqueName: \"kubernetes.io/projected/d84941bb-fa7d-434b-87d7-cd6a65cddcac-kube-api-access-psxg5\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294482 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d84941bb-fa7d-434b-87d7-cd6a65cddcac-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294489 4647 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d84941bb-fa7d-434b-87d7-cd6a65cddcac-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294499 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294507 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294515 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wc62g\" (UniqueName: \"kubernetes.io/projected/2648dbdb-8c59-484b-bc7e-303dbbbf4b15-kube-api-access-wc62g\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:12 crc kubenswrapper[4647]: I1128 15:43:12.294524 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a477d1b-0255-4aef-8415-5297f93df84b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.045090 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-685658f7df-nxgvv" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.045106 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.045157 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5775b4c989-4jknn" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.129055 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5775b4c989-4jknn"] Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.138667 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5775b4c989-4jknn"] Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.158668 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-67qmp"] Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.170900 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-67qmp"] Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.194785 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-685658f7df-nxgvv"] Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.208476 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-685658f7df-nxgvv"] Nov 28 15:43:13 crc kubenswrapper[4647]: E1128 15:43:13.554595 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 28 15:43:13 crc kubenswrapper[4647]: E1128 15:43:13.555148 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h4mbr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-p265g_openstack(4f54b294-79b2-4097-9011-f094f66cc705): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:43:13 crc kubenswrapper[4647]: E1128 15:43:13.556850 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-p265g" podUID="4f54b294-79b2-4097-9011-f094f66cc705" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.658330 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.667839 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-s6szl" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.768084 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12316c5a-0c4f-453b-85e5-4016ac495dd6-logs\") pod \"12316c5a-0c4f-453b-85e5-4016ac495dd6\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.768943 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qj2h\" (UniqueName: \"kubernetes.io/projected/12316c5a-0c4f-453b-85e5-4016ac495dd6-kube-api-access-5qj2h\") pod \"12316c5a-0c4f-453b-85e5-4016ac495dd6\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.769087 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-config-data\") pod \"12316c5a-0c4f-453b-85e5-4016ac495dd6\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.769168 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-combined-ca-bundle\") pod \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.769277 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-config\") pod \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.769363 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-scripts\") pod \"12316c5a-0c4f-453b-85e5-4016ac495dd6\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.769458 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjtv6\" (UniqueName: \"kubernetes.io/projected/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-kube-api-access-wjtv6\") pod \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\" (UID: \"67922fe2-4c18-4d43-8d80-ede2c34cb2c6\") " Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.769651 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/12316c5a-0c4f-453b-85e5-4016ac495dd6-horizon-secret-key\") pod \"12316c5a-0c4f-453b-85e5-4016ac495dd6\" (UID: \"12316c5a-0c4f-453b-85e5-4016ac495dd6\") " Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.769113 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12316c5a-0c4f-453b-85e5-4016ac495dd6-logs" (OuterVolumeSpecName: "logs") pod "12316c5a-0c4f-453b-85e5-4016ac495dd6" (UID: "12316c5a-0c4f-453b-85e5-4016ac495dd6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.771629 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-scripts" (OuterVolumeSpecName: "scripts") pod "12316c5a-0c4f-453b-85e5-4016ac495dd6" (UID: "12316c5a-0c4f-453b-85e5-4016ac495dd6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.771770 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-config-data" (OuterVolumeSpecName: "config-data") pod "12316c5a-0c4f-453b-85e5-4016ac495dd6" (UID: "12316c5a-0c4f-453b-85e5-4016ac495dd6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.773970 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12316c5a-0c4f-453b-85e5-4016ac495dd6-kube-api-access-5qj2h" (OuterVolumeSpecName: "kube-api-access-5qj2h") pod "12316c5a-0c4f-453b-85e5-4016ac495dd6" (UID: "12316c5a-0c4f-453b-85e5-4016ac495dd6"). InnerVolumeSpecName "kube-api-access-5qj2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.776974 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-kube-api-access-wjtv6" (OuterVolumeSpecName: "kube-api-access-wjtv6") pod "67922fe2-4c18-4d43-8d80-ede2c34cb2c6" (UID: "67922fe2-4c18-4d43-8d80-ede2c34cb2c6"). InnerVolumeSpecName "kube-api-access-wjtv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.778116 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12316c5a-0c4f-453b-85e5-4016ac495dd6-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "12316c5a-0c4f-453b-85e5-4016ac495dd6" (UID: "12316c5a-0c4f-453b-85e5-4016ac495dd6"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.813104 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-config" (OuterVolumeSpecName: "config") pod "67922fe2-4c18-4d43-8d80-ede2c34cb2c6" (UID: "67922fe2-4c18-4d43-8d80-ede2c34cb2c6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.816620 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "67922fe2-4c18-4d43-8d80-ede2c34cb2c6" (UID: "67922fe2-4c18-4d43-8d80-ede2c34cb2c6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.873020 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.873050 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.873064 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.873077 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12316c5a-0c4f-453b-85e5-4016ac495dd6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.873087 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjtv6\" (UniqueName: \"kubernetes.io/projected/67922fe2-4c18-4d43-8d80-ede2c34cb2c6-kube-api-access-wjtv6\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.873099 4647 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/12316c5a-0c4f-453b-85e5-4016ac495dd6-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.873108 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12316c5a-0c4f-453b-85e5-4016ac495dd6-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:13 crc kubenswrapper[4647]: I1128 15:43:13.873121 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qj2h\" (UniqueName: \"kubernetes.io/projected/12316c5a-0c4f-453b-85e5-4016ac495dd6-kube-api-access-5qj2h\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:14 crc kubenswrapper[4647]: E1128 15:43:14.036126 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified" Nov 28 15:43:14 crc kubenswrapper[4647]: E1128 15:43:14.036362 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-notification-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n67fh54h657h55h64fh66ch699h655h89h576h54fh5d9h5f5h659hbbh5f7h595h7fh67dh5c5hb7h9dh64dhcch9ch549hbdhbh664h5d4h586h97q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-notification-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zwg9m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/notificationhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.055440 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d9696645-7j2gj" event={"ID":"12316c5a-0c4f-453b-85e5-4016ac495dd6","Type":"ContainerDied","Data":"66adde51c32eff915c0be126a5551c3bf99f2e167785e5d4135a29801b837579"} Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.055496 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d9696645-7j2gj" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.064887 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-s6szl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.065592 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-s6szl" event={"ID":"67922fe2-4c18-4d43-8d80-ede2c34cb2c6","Type":"ContainerDied","Data":"301be7fd17dbee4077be366fc11a3c7f08f4da9f46817945ba48ef22aa224149"} Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.065655 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="301be7fd17dbee4077be366fc11a3c7f08f4da9f46817945ba48ef22aa224149" Nov 28 15:43:14 crc kubenswrapper[4647]: E1128 15:43:14.086561 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-p265g" podUID="4f54b294-79b2-4097-9011-f094f66cc705" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.252863 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84947f5948-ml477"] Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.303428 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5d9696645-7j2gj"] Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.315700 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5d9696645-7j2gj"] Nov 28 15:43:14 crc kubenswrapper[4647]: W1128 15:43:14.329831 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod138caa60_71a7_49ba_9a82_42664b2b2276.slice/crio-d248e4b4608f30316e3dc85463f78bf90125565718729d2e80c09bd3dbba0470 WatchSource:0}: Error finding container d248e4b4608f30316e3dc85463f78bf90125565718729d2e80c09bd3dbba0470: Status 404 returned error can't find the container with id d248e4b4608f30316e3dc85463f78bf90125565718729d2e80c09bd3dbba0470 Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.389039 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-b2jxs"] Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.454544 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" path="/var/lib/kubelet/pods/0a477d1b-0255-4aef-8415-5297f93df84b/volumes" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.461366 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12316c5a-0c4f-453b-85e5-4016ac495dd6" path="/var/lib/kubelet/pods/12316c5a-0c4f-453b-85e5-4016ac495dd6/volumes" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.461821 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2648dbdb-8c59-484b-bc7e-303dbbbf4b15" path="/var/lib/kubelet/pods/2648dbdb-8c59-484b-bc7e-303dbbbf4b15/volumes" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.462273 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d84941bb-fa7d-434b-87d7-cd6a65cddcac" path="/var/lib/kubelet/pods/d84941bb-fa7d-434b-87d7-cd6a65cddcac/volumes" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.487611 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-678f98b656-ftpzl"] Nov 28 15:43:14 crc kubenswrapper[4647]: E1128 15:43:14.488100 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="init" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.488114 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="init" Nov 28 15:43:14 crc kubenswrapper[4647]: E1128 15:43:14.488131 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67922fe2-4c18-4d43-8d80-ede2c34cb2c6" containerName="neutron-db-sync" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.488138 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="67922fe2-4c18-4d43-8d80-ede2c34cb2c6" containerName="neutron-db-sync" Nov 28 15:43:14 crc kubenswrapper[4647]: E1128 15:43:14.488153 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.488160 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.488326 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="67922fe2-4c18-4d43-8d80-ede2c34cb2c6" containerName="neutron-db-sync" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.488343 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.489294 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.494788 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.494998 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-x6c45" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.495116 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.498624 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.524827 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-2xxj2"] Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.533704 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.598102 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-2xxj2"] Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.619086 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-httpd-config\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.619144 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-combined-ca-bundle\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.619171 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kthb\" (UniqueName: \"kubernetes.io/projected/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-kube-api-access-8kthb\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.619232 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-ovndb-tls-certs\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.619283 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-config\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.646507 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-678f98b656-ftpzl"] Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.735977 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.736788 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-config\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.736825 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.736868 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.736944 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.736980 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-httpd-config\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.737001 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-combined-ca-bundle\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.737020 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kthb\" (UniqueName: \"kubernetes.io/projected/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-kube-api-access-8kthb\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.737047 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-ovndb-tls-certs\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.741324 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lwwt\" (UniqueName: \"kubernetes.io/projected/d7b93065-ae7a-4916-ac48-a672dd1048cb-kube-api-access-6lwwt\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.741373 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-config\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.756146 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-httpd-config\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.757246 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kthb\" (UniqueName: \"kubernetes.io/projected/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-kube-api-access-8kthb\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.757352 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-config\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.766382 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-combined-ca-bundle\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.766394 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-ovndb-tls-certs\") pod \"neutron-678f98b656-ftpzl\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.846290 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-config\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.846337 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.846358 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.846385 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.846514 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lwwt\" (UniqueName: \"kubernetes.io/projected/d7b93065-ae7a-4916-ac48-a672dd1048cb-kube-api-access-6lwwt\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.846572 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.847573 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-config\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.848345 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.848387 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.849351 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.848921 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.886719 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lwwt\" (UniqueName: \"kubernetes.io/projected/d7b93065-ae7a-4916-ac48-a672dd1048cb-kube-api-access-6lwwt\") pod \"dnsmasq-dns-5ccc5c4795-2xxj2\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.905881 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.907089 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-b2jxs"] Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.963287 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:14 crc kubenswrapper[4647]: I1128 15:43:14.964218 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-66c6c46cdb-xgv7h"] Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.056646 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:43:15 crc kubenswrapper[4647]: W1128 15:43:15.115446 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod989faff9_98dc_4b76_b0bd_5c90aa13075b.slice/crio-b43cf5598e142f7076ca5e32bd54eb9b98264fea96e6a426b1ef5358319c3406 WatchSource:0}: Error finding container b43cf5598e142f7076ca5e32bd54eb9b98264fea96e6a426b1ef5358319c3406: Status 404 returned error can't find the container with id b43cf5598e142f7076ca5e32bd54eb9b98264fea96e6a426b1ef5358319c3406 Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.116632 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84947f5948-ml477" event={"ID":"138caa60-71a7-49ba-9a82-42664b2b2276","Type":"ContainerStarted","Data":"d248e4b4608f30316e3dc85463f78bf90125565718729d2e80c09bd3dbba0470"} Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.123122 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-n92kx" event={"ID":"c34bccdb-127a-40c0-ac61-6e2f354a6c6d","Type":"ContainerStarted","Data":"8677705d8f0bedddf37e0a160a871511a7e394b111b5da45276d87698d685fcb"} Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.134483 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66c6c46cdb-xgv7h" event={"ID":"278aef39-0aaf-4d33-b167-0f0cca8248fd","Type":"ContainerStarted","Data":"60abfda378c901e9db12b7fae8670b6e59d7a2ac685b122b09c87e140c810396"} Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.142919 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-n92kx" podStartSLOduration=3.5464436790000002 podStartE2EDuration="45.142904942s" podCreationTimestamp="2025-11-28 15:42:30 +0000 UTC" firstStartedPulling="2025-11-28 15:42:31.923246486 +0000 UTC m=+1081.770852907" lastFinishedPulling="2025-11-28 15:43:13.519707739 +0000 UTC m=+1123.367314170" observedRunningTime="2025-11-28 15:43:15.136502884 +0000 UTC m=+1124.984109305" watchObservedRunningTime="2025-11-28 15:43:15.142904942 +0000 UTC m=+1124.990511363" Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.146356 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" event={"ID":"2853894d-6f8e-42c5-b7fb-8d4efb427915","Type":"ContainerStarted","Data":"94e1b2f0d45b4f530b3e94ca6ecbccb8f8defe30a828a4add4b4df7a17ecea52"} Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.197611 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6k9fd"] Nov 28 15:43:15 crc kubenswrapper[4647]: W1128 15:43:15.220766 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75b1595c_cca5_4809_94cf_c1c0ec937c27.slice/crio-4068bddb1074836792dd59efecab72bbbaa955569af6ab666ad8889a72dc9202 WatchSource:0}: Error finding container 4068bddb1074836792dd59efecab72bbbaa955569af6ab666ad8889a72dc9202: Status 404 returned error can't find the container with id 4068bddb1074836792dd59efecab72bbbaa955569af6ab666ad8889a72dc9202 Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.311973 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:43:15 crc kubenswrapper[4647]: W1128 15:43:15.371572 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ca92afd_9d04_4e91_be1e_69c1665a5d20.slice/crio-e7a281228dd5455cd68f5efece14c769bf38a97c07ab4d9e78deed3c7ae2d62f WatchSource:0}: Error finding container e7a281228dd5455cd68f5efece14c769bf38a97c07ab4d9e78deed3c7ae2d62f: Status 404 returned error can't find the container with id e7a281228dd5455cd68f5efece14c769bf38a97c07ab4d9e78deed3c7ae2d62f Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.605557 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-67qmp" podUID="0a477d1b-0255-4aef-8415-5297f93df84b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.128:5353: i/o timeout" Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.721436 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-678f98b656-ftpzl"] Nov 28 15:43:15 crc kubenswrapper[4647]: I1128 15:43:15.823990 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-2xxj2"] Nov 28 15:43:15 crc kubenswrapper[4647]: W1128 15:43:15.879500 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7b93065_ae7a_4916_ac48_a672dd1048cb.slice/crio-80fb394bae4eb8a00bfc9c13c4630a41ac0273c9ce2baef353367912decc3576 WatchSource:0}: Error finding container 80fb394bae4eb8a00bfc9c13c4630a41ac0273c9ce2baef353367912decc3576: Status 404 returned error can't find the container with id 80fb394bae4eb8a00bfc9c13c4630a41ac0273c9ce2baef353367912decc3576 Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.232382 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"989faff9-98dc-4b76-b0bd-5c90aa13075b","Type":"ContainerStarted","Data":"b43cf5598e142f7076ca5e32bd54eb9b98264fea96e6a426b1ef5358319c3406"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.242002 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9ca92afd-9d04-4e91-be1e-69c1665a5d20","Type":"ContainerStarted","Data":"e7a281228dd5455cd68f5efece14c769bf38a97c07ab4d9e78deed3c7ae2d62f"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.263801 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66c6c46cdb-xgv7h" event={"ID":"278aef39-0aaf-4d33-b167-0f0cca8248fd","Type":"ContainerStarted","Data":"420ea4980bc1bd80fd5479d418826195243c81ba3ab1d29a3b1bf8f7eb2fbb66"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.263845 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66c6c46cdb-xgv7h" event={"ID":"278aef39-0aaf-4d33-b167-0f0cca8248fd","Type":"ContainerStarted","Data":"c6ec93ef7a20443dde196786685a68b7f1c6de2613da04496b4f686034fa9120"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.269019 4647 generic.go:334] "Generic (PLEG): container finished" podID="2853894d-6f8e-42c5-b7fb-8d4efb427915" containerID="17330f3aff7ebb15f49e32319241fdbc27ab43d658dd4d0329ae22c13377b710" exitCode=0 Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.269066 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" event={"ID":"2853894d-6f8e-42c5-b7fb-8d4efb427915","Type":"ContainerDied","Data":"17330f3aff7ebb15f49e32319241fdbc27ab43d658dd4d0329ae22c13377b710"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.283549 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84947f5948-ml477" event={"ID":"138caa60-71a7-49ba-9a82-42664b2b2276","Type":"ContainerStarted","Data":"337a6f06547910f852193d96b2d302984067b5cc092cdb2280e405c74a139b4e"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.283615 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84947f5948-ml477" event={"ID":"138caa60-71a7-49ba-9a82-42664b2b2276","Type":"ContainerStarted","Data":"64b235df86da853e7bafc731cad556fae249bb1f53e1740afe1e53fb1763ad97"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.287062 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-678f98b656-ftpzl" event={"ID":"51f88ecc-6eae-4026-bb05-ec69c3bc65ee","Type":"ContainerStarted","Data":"7563fe0020ecc9b1f60fa298d03bcd154ddcc5215012322fde02968b070b7939"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.287095 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-678f98b656-ftpzl" event={"ID":"51f88ecc-6eae-4026-bb05-ec69c3bc65ee","Type":"ContainerStarted","Data":"e7e058b0788eb4840c897ff9b317a410f7c28a2e2a3028d75cd74c345af2aa81"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.294801 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" event={"ID":"d7b93065-ae7a-4916-ac48-a672dd1048cb","Type":"ContainerStarted","Data":"80fb394bae4eb8a00bfc9c13c4630a41ac0273c9ce2baef353367912decc3576"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.310823 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-66c6c46cdb-xgv7h" podStartSLOduration=42.310806615 podStartE2EDuration="42.310806615s" podCreationTimestamp="2025-11-28 15:42:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:16.303993526 +0000 UTC m=+1126.151599947" watchObservedRunningTime="2025-11-28 15:43:16.310806615 +0000 UTC m=+1126.158413036" Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.316787 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6k9fd" event={"ID":"75b1595c-cca5-4809-94cf-c1c0ec937c27","Type":"ContainerStarted","Data":"0c7941f64ec343f785e561d3ff7461d27b9638e37faf8497e409296aacfa6710"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.316824 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6k9fd" event={"ID":"75b1595c-cca5-4809-94cf-c1c0ec937c27","Type":"ContainerStarted","Data":"4068bddb1074836792dd59efecab72bbbaa955569af6ab666ad8889a72dc9202"} Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.358769 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-84947f5948-ml477" podStartSLOduration=41.670749743 podStartE2EDuration="42.358753127s" podCreationTimestamp="2025-11-28 15:42:34 +0000 UTC" firstStartedPulling="2025-11-28 15:43:14.331756218 +0000 UTC m=+1124.179362639" lastFinishedPulling="2025-11-28 15:43:15.019759602 +0000 UTC m=+1124.867366023" observedRunningTime="2025-11-28 15:43:16.35734658 +0000 UTC m=+1126.204953001" watchObservedRunningTime="2025-11-28 15:43:16.358753127 +0000 UTC m=+1126.206359548" Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.421373 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-6k9fd" podStartSLOduration=31.421353534 podStartE2EDuration="31.421353534s" podCreationTimestamp="2025-11-28 15:42:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:16.391110428 +0000 UTC m=+1126.238716849" watchObservedRunningTime="2025-11-28 15:43:16.421353534 +0000 UTC m=+1126.268959955" Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.841275 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.936001 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-nb\") pod \"2853894d-6f8e-42c5-b7fb-8d4efb427915\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.936051 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-config\") pod \"2853894d-6f8e-42c5-b7fb-8d4efb427915\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.936123 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-sb\") pod \"2853894d-6f8e-42c5-b7fb-8d4efb427915\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.936151 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnvp5\" (UniqueName: \"kubernetes.io/projected/2853894d-6f8e-42c5-b7fb-8d4efb427915-kube-api-access-tnvp5\") pod \"2853894d-6f8e-42c5-b7fb-8d4efb427915\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.936243 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-svc\") pod \"2853894d-6f8e-42c5-b7fb-8d4efb427915\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.936263 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-swift-storage-0\") pod \"2853894d-6f8e-42c5-b7fb-8d4efb427915\" (UID: \"2853894d-6f8e-42c5-b7fb-8d4efb427915\") " Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.982649 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2853894d-6f8e-42c5-b7fb-8d4efb427915-kube-api-access-tnvp5" (OuterVolumeSpecName: "kube-api-access-tnvp5") pod "2853894d-6f8e-42c5-b7fb-8d4efb427915" (UID: "2853894d-6f8e-42c5-b7fb-8d4efb427915"). InnerVolumeSpecName "kube-api-access-tnvp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:16 crc kubenswrapper[4647]: I1128 15:43:16.989705 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2853894d-6f8e-42c5-b7fb-8d4efb427915" (UID: "2853894d-6f8e-42c5-b7fb-8d4efb427915"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.003881 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2853894d-6f8e-42c5-b7fb-8d4efb427915" (UID: "2853894d-6f8e-42c5-b7fb-8d4efb427915"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.016847 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2853894d-6f8e-42c5-b7fb-8d4efb427915" (UID: "2853894d-6f8e-42c5-b7fb-8d4efb427915"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.022650 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.022707 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.022761 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.023514 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0d6ebaf8cb633650448f2badcf640129d01a9742c40868864eb5611603a41a80"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.023569 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://0d6ebaf8cb633650448f2badcf640129d01a9742c40868864eb5611603a41a80" gracePeriod=600 Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.030992 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-config" (OuterVolumeSpecName: "config") pod "2853894d-6f8e-42c5-b7fb-8d4efb427915" (UID: "2853894d-6f8e-42c5-b7fb-8d4efb427915"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.039003 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.039033 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnvp5\" (UniqueName: \"kubernetes.io/projected/2853894d-6f8e-42c5-b7fb-8d4efb427915-kube-api-access-tnvp5\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.039045 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.039053 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.039165 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.082453 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2853894d-6f8e-42c5-b7fb-8d4efb427915" (UID: "2853894d-6f8e-42c5-b7fb-8d4efb427915"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.142932 4647 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2853894d-6f8e-42c5-b7fb-8d4efb427915-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.354946 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9ca92afd-9d04-4e91-be1e-69c1665a5d20","Type":"ContainerStarted","Data":"edb702d5d503c1d5483b4444103c192c94ea73442817a3315be15bc40d48dc42"} Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.359021 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" event={"ID":"2853894d-6f8e-42c5-b7fb-8d4efb427915","Type":"ContainerDied","Data":"94e1b2f0d45b4f530b3e94ca6ecbccb8f8defe30a828a4add4b4df7a17ecea52"} Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.359060 4647 scope.go:117] "RemoveContainer" containerID="17330f3aff7ebb15f49e32319241fdbc27ab43d658dd4d0329ae22c13377b710" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.359181 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-b2jxs" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.385925 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-678f98b656-ftpzl" event={"ID":"51f88ecc-6eae-4026-bb05-ec69c3bc65ee","Type":"ContainerStarted","Data":"533eafe7adce88e49f74eb427938363d9b928e13c2700cbed87f0eab78c3a711"} Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.386039 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.387613 4647 generic.go:334] "Generic (PLEG): container finished" podID="d7b93065-ae7a-4916-ac48-a672dd1048cb" containerID="64cbf551a77b35e8219e93268918ff9f69138aac8be2f00b220770a780108538" exitCode=0 Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.387670 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" event={"ID":"d7b93065-ae7a-4916-ac48-a672dd1048cb","Type":"ContainerDied","Data":"64cbf551a77b35e8219e93268918ff9f69138aac8be2f00b220770a780108538"} Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.389808 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"989faff9-98dc-4b76-b0bd-5c90aa13075b","Type":"ContainerStarted","Data":"03587fff3683e7c31f7c20e82c5a9ed1e15569b894b2c33a94823dd19cea5ecc"} Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.405087 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="0d6ebaf8cb633650448f2badcf640129d01a9742c40868864eb5611603a41a80" exitCode=0 Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.405819 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"0d6ebaf8cb633650448f2badcf640129d01a9742c40868864eb5611603a41a80"} Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.425062 4647 scope.go:117] "RemoveContainer" containerID="074d2a9435958e2c7736d08961659c11b166c59191e27b818dd5c1f09fc03871" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.428986 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-b2jxs"] Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.439811 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-b2jxs"] Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.445749 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-678f98b656-ftpzl" podStartSLOduration=3.4457253899999998 podStartE2EDuration="3.44572539s" podCreationTimestamp="2025-11-28 15:43:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:17.444036136 +0000 UTC m=+1127.291642547" watchObservedRunningTime="2025-11-28 15:43:17.44572539 +0000 UTC m=+1127.293331811" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.627447 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-76d7749889-z87rt"] Nov 28 15:43:17 crc kubenswrapper[4647]: E1128 15:43:17.627931 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2853894d-6f8e-42c5-b7fb-8d4efb427915" containerName="init" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.627948 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="2853894d-6f8e-42c5-b7fb-8d4efb427915" containerName="init" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.628157 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="2853894d-6f8e-42c5-b7fb-8d4efb427915" containerName="init" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.629206 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.643464 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.644444 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.658192 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-76d7749889-z87rt"] Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.765563 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-config\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.765635 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkrc4\" (UniqueName: \"kubernetes.io/projected/8e65fe60-2d61-4066-aed7-6e211c8f2096-kube-api-access-rkrc4\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.765657 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-combined-ca-bundle\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.765691 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-ovndb-tls-certs\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.765750 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-httpd-config\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.765785 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-internal-tls-certs\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.765811 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-public-tls-certs\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.867163 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkrc4\" (UniqueName: \"kubernetes.io/projected/8e65fe60-2d61-4066-aed7-6e211c8f2096-kube-api-access-rkrc4\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.867216 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-combined-ca-bundle\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.867253 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-ovndb-tls-certs\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.868019 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-httpd-config\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.868103 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-internal-tls-certs\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.868133 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-public-tls-certs\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.868159 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-config\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.875572 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-ovndb-tls-certs\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.877916 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-httpd-config\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.878503 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-public-tls-certs\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.888540 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-combined-ca-bundle\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.888781 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-config\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.891533 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkrc4\" (UniqueName: \"kubernetes.io/projected/8e65fe60-2d61-4066-aed7-6e211c8f2096-kube-api-access-rkrc4\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:17 crc kubenswrapper[4647]: I1128 15:43:17.913067 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e65fe60-2d61-4066-aed7-6e211c8f2096-internal-tls-certs\") pod \"neutron-76d7749889-z87rt\" (UID: \"8e65fe60-2d61-4066-aed7-6e211c8f2096\") " pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:18 crc kubenswrapper[4647]: I1128 15:43:18.006789 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:18 crc kubenswrapper[4647]: I1128 15:43:18.433980 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2853894d-6f8e-42c5-b7fb-8d4efb427915" path="/var/lib/kubelet/pods/2853894d-6f8e-42c5-b7fb-8d4efb427915/volumes" Nov 28 15:43:18 crc kubenswrapper[4647]: I1128 15:43:18.459822 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" event={"ID":"d7b93065-ae7a-4916-ac48-a672dd1048cb","Type":"ContainerStarted","Data":"cd4cee753584194ed7fc31e2f6710ccbeede19870d6b88550eead7cbe9337dda"} Nov 28 15:43:18 crc kubenswrapper[4647]: I1128 15:43:18.465320 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"989faff9-98dc-4b76-b0bd-5c90aa13075b","Type":"ContainerStarted","Data":"8b8e5f2401540e9cf4370b6d81c3b3e9e256f4b6d5462c1793ad9c3be189a929"} Nov 28 15:43:18 crc kubenswrapper[4647]: I1128 15:43:18.465571 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="989faff9-98dc-4b76-b0bd-5c90aa13075b" containerName="glance-log" containerID="cri-o://03587fff3683e7c31f7c20e82c5a9ed1e15569b894b2c33a94823dd19cea5ecc" gracePeriod=30 Nov 28 15:43:18 crc kubenswrapper[4647]: I1128 15:43:18.469033 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="989faff9-98dc-4b76-b0bd-5c90aa13075b" containerName="glance-httpd" containerID="cri-o://8b8e5f2401540e9cf4370b6d81c3b3e9e256f4b6d5462c1793ad9c3be189a929" gracePeriod=30 Nov 28 15:43:18 crc kubenswrapper[4647]: I1128 15:43:18.490529 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"06d277d727639fef75113ec72cf0adfccb4fffa2c30a4bb0f3631c657cbb984b"} Nov 28 15:43:18 crc kubenswrapper[4647]: I1128 15:43:18.523941 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=32.523918491 podStartE2EDuration="32.523918491s" podCreationTimestamp="2025-11-28 15:42:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:18.52122701 +0000 UTC m=+1128.368833431" watchObservedRunningTime="2025-11-28 15:43:18.523918491 +0000 UTC m=+1128.371524912" Nov 28 15:43:18 crc kubenswrapper[4647]: I1128 15:43:18.914381 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-76d7749889-z87rt"] Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.524573 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-76d7749889-z87rt" event={"ID":"8e65fe60-2d61-4066-aed7-6e211c8f2096","Type":"ContainerStarted","Data":"f047c24b94d58f14516fe52d942859f0dc90623012aedc4b1510604db9d26dd9"} Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.541133 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9ca92afd-9d04-4e91-be1e-69c1665a5d20","Type":"ContainerStarted","Data":"6b80d93bd89973e3884b1b26c9e02423767ed625bd64fff9a060eb7635e1f4bb"} Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.541320 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" containerName="glance-log" containerID="cri-o://edb702d5d503c1d5483b4444103c192c94ea73442817a3315be15bc40d48dc42" gracePeriod=30 Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.541830 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" containerName="glance-httpd" containerID="cri-o://6b80d93bd89973e3884b1b26c9e02423767ed625bd64fff9a060eb7635e1f4bb" gracePeriod=30 Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.573256 4647 generic.go:334] "Generic (PLEG): container finished" podID="989faff9-98dc-4b76-b0bd-5c90aa13075b" containerID="8b8e5f2401540e9cf4370b6d81c3b3e9e256f4b6d5462c1793ad9c3be189a929" exitCode=143 Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.573293 4647 generic.go:334] "Generic (PLEG): container finished" podID="989faff9-98dc-4b76-b0bd-5c90aa13075b" containerID="03587fff3683e7c31f7c20e82c5a9ed1e15569b894b2c33a94823dd19cea5ecc" exitCode=143 Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.574314 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"989faff9-98dc-4b76-b0bd-5c90aa13075b","Type":"ContainerDied","Data":"8b8e5f2401540e9cf4370b6d81c3b3e9e256f4b6d5462c1793ad9c3be189a929"} Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.574349 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"989faff9-98dc-4b76-b0bd-5c90aa13075b","Type":"ContainerDied","Data":"03587fff3683e7c31f7c20e82c5a9ed1e15569b894b2c33a94823dd19cea5ecc"} Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.574805 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.583168 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=33.583151925 podStartE2EDuration="33.583151925s" podCreationTimestamp="2025-11-28 15:42:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:19.582486377 +0000 UTC m=+1129.430092798" watchObservedRunningTime="2025-11-28 15:43:19.583151925 +0000 UTC m=+1129.430758346" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.607180 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" podStartSLOduration=5.607155286 podStartE2EDuration="5.607155286s" podCreationTimestamp="2025-11-28 15:43:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:19.604526397 +0000 UTC m=+1129.452132818" watchObservedRunningTime="2025-11-28 15:43:19.607155286 +0000 UTC m=+1129.454761707" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.615787 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.731273 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-scripts\") pod \"989faff9-98dc-4b76-b0bd-5c90aa13075b\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.731452 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-httpd-run\") pod \"989faff9-98dc-4b76-b0bd-5c90aa13075b\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.731489 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d97bs\" (UniqueName: \"kubernetes.io/projected/989faff9-98dc-4b76-b0bd-5c90aa13075b-kube-api-access-d97bs\") pod \"989faff9-98dc-4b76-b0bd-5c90aa13075b\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.731824 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"989faff9-98dc-4b76-b0bd-5c90aa13075b\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.731925 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-config-data\") pod \"989faff9-98dc-4b76-b0bd-5c90aa13075b\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.732013 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-logs\") pod \"989faff9-98dc-4b76-b0bd-5c90aa13075b\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.732075 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-combined-ca-bundle\") pod \"989faff9-98dc-4b76-b0bd-5c90aa13075b\" (UID: \"989faff9-98dc-4b76-b0bd-5c90aa13075b\") " Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.732568 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "989faff9-98dc-4b76-b0bd-5c90aa13075b" (UID: "989faff9-98dc-4b76-b0bd-5c90aa13075b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.732689 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-logs" (OuterVolumeSpecName: "logs") pod "989faff9-98dc-4b76-b0bd-5c90aa13075b" (UID: "989faff9-98dc-4b76-b0bd-5c90aa13075b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.733980 4647 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.734000 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/989faff9-98dc-4b76-b0bd-5c90aa13075b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.743579 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "989faff9-98dc-4b76-b0bd-5c90aa13075b" (UID: "989faff9-98dc-4b76-b0bd-5c90aa13075b"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.743612 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/989faff9-98dc-4b76-b0bd-5c90aa13075b-kube-api-access-d97bs" (OuterVolumeSpecName: "kube-api-access-d97bs") pod "989faff9-98dc-4b76-b0bd-5c90aa13075b" (UID: "989faff9-98dc-4b76-b0bd-5c90aa13075b"). InnerVolumeSpecName "kube-api-access-d97bs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.745598 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-scripts" (OuterVolumeSpecName: "scripts") pod "989faff9-98dc-4b76-b0bd-5c90aa13075b" (UID: "989faff9-98dc-4b76-b0bd-5c90aa13075b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.787148 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "989faff9-98dc-4b76-b0bd-5c90aa13075b" (UID: "989faff9-98dc-4b76-b0bd-5c90aa13075b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.814517 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-config-data" (OuterVolumeSpecName: "config-data") pod "989faff9-98dc-4b76-b0bd-5c90aa13075b" (UID: "989faff9-98dc-4b76-b0bd-5c90aa13075b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.838340 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.838381 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.838395 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d97bs\" (UniqueName: \"kubernetes.io/projected/989faff9-98dc-4b76-b0bd-5c90aa13075b-kube-api-access-d97bs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.838455 4647 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.838469 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/989faff9-98dc-4b76-b0bd-5c90aa13075b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.859800 4647 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 28 15:43:19 crc kubenswrapper[4647]: I1128 15:43:19.940507 4647 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.648051 4647 generic.go:334] "Generic (PLEG): container finished" podID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" containerID="6b80d93bd89973e3884b1b26c9e02423767ed625bd64fff9a060eb7635e1f4bb" exitCode=0 Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.648618 4647 generic.go:334] "Generic (PLEG): container finished" podID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" containerID="edb702d5d503c1d5483b4444103c192c94ea73442817a3315be15bc40d48dc42" exitCode=143 Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.648662 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9ca92afd-9d04-4e91-be1e-69c1665a5d20","Type":"ContainerDied","Data":"6b80d93bd89973e3884b1b26c9e02423767ed625bd64fff9a060eb7635e1f4bb"} Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.648690 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9ca92afd-9d04-4e91-be1e-69c1665a5d20","Type":"ContainerDied","Data":"edb702d5d503c1d5483b4444103c192c94ea73442817a3315be15bc40d48dc42"} Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.671288 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-76d7749889-z87rt" event={"ID":"8e65fe60-2d61-4066-aed7-6e211c8f2096","Type":"ContainerStarted","Data":"f1ada9cdfd8447d41be38b7115477d3a3a0fae080ed30a09304d017017e673ce"} Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.671340 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-76d7749889-z87rt" event={"ID":"8e65fe60-2d61-4066-aed7-6e211c8f2096","Type":"ContainerStarted","Data":"41ac25b519f31e0ae16c930322b4d87b55201a979db115f59341bafcac0c5d57"} Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.672282 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.707609 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.708074 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"989faff9-98dc-4b76-b0bd-5c90aa13075b","Type":"ContainerDied","Data":"b43cf5598e142f7076ca5e32bd54eb9b98264fea96e6a426b1ef5358319c3406"} Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.708109 4647 scope.go:117] "RemoveContainer" containerID="8b8e5f2401540e9cf4370b6d81c3b3e9e256f4b6d5462c1793ad9c3be189a929" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.790221 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.793845 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-76d7749889-z87rt" podStartSLOduration=3.793830013 podStartE2EDuration="3.793830013s" podCreationTimestamp="2025-11-28 15:43:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:20.719777765 +0000 UTC m=+1130.567384176" watchObservedRunningTime="2025-11-28 15:43:20.793830013 +0000 UTC m=+1130.641436434" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.806477 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.812148 4647 scope.go:117] "RemoveContainer" containerID="03587fff3683e7c31f7c20e82c5a9ed1e15569b894b2c33a94823dd19cea5ecc" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.815897 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.862381 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:43:20 crc kubenswrapper[4647]: E1128 15:43:20.864090 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" containerName="glance-httpd" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.864192 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" containerName="glance-httpd" Nov 28 15:43:20 crc kubenswrapper[4647]: E1128 15:43:20.864263 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="989faff9-98dc-4b76-b0bd-5c90aa13075b" containerName="glance-httpd" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.864315 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="989faff9-98dc-4b76-b0bd-5c90aa13075b" containerName="glance-httpd" Nov 28 15:43:20 crc kubenswrapper[4647]: E1128 15:43:20.864374 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="989faff9-98dc-4b76-b0bd-5c90aa13075b" containerName="glance-log" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.864448 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="989faff9-98dc-4b76-b0bd-5c90aa13075b" containerName="glance-log" Nov 28 15:43:20 crc kubenswrapper[4647]: E1128 15:43:20.864514 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" containerName="glance-log" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.864569 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" containerName="glance-log" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.865288 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="989faff9-98dc-4b76-b0bd-5c90aa13075b" containerName="glance-log" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.865364 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" containerName="glance-log" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.865438 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="989faff9-98dc-4b76-b0bd-5c90aa13075b" containerName="glance-httpd" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.865509 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" containerName="glance-httpd" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.867837 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.868134 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmfdl\" (UniqueName: \"kubernetes.io/projected/9ca92afd-9d04-4e91-be1e-69c1665a5d20-kube-api-access-wmfdl\") pod \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.868177 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-combined-ca-bundle\") pod \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.868340 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-config-data\") pod \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.868385 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.868496 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-scripts\") pod \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.868525 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-logs\") pod \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.868554 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-httpd-run\") pod \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\" (UID: \"9ca92afd-9d04-4e91-be1e-69c1665a5d20\") " Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.871224 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9ca92afd-9d04-4e91-be1e-69c1665a5d20" (UID: "9ca92afd-9d04-4e91-be1e-69c1665a5d20"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.876389 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.882639 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.888131 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.889451 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-logs" (OuterVolumeSpecName: "logs") pod "9ca92afd-9d04-4e91-be1e-69c1665a5d20" (UID: "9ca92afd-9d04-4e91-be1e-69c1665a5d20"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.893304 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "9ca92afd-9d04-4e91-be1e-69c1665a5d20" (UID: "9ca92afd-9d04-4e91-be1e-69c1665a5d20"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.893702 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ca92afd-9d04-4e91-be1e-69c1665a5d20-kube-api-access-wmfdl" (OuterVolumeSpecName: "kube-api-access-wmfdl") pod "9ca92afd-9d04-4e91-be1e-69c1665a5d20" (UID: "9ca92afd-9d04-4e91-be1e-69c1665a5d20"). InnerVolumeSpecName "kube-api-access-wmfdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.896687 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-scripts" (OuterVolumeSpecName: "scripts") pod "9ca92afd-9d04-4e91-be1e-69c1665a5d20" (UID: "9ca92afd-9d04-4e91-be1e-69c1665a5d20"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.962117 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-config-data" (OuterVolumeSpecName: "config-data") pod "9ca92afd-9d04-4e91-be1e-69c1665a5d20" (UID: "9ca92afd-9d04-4e91-be1e-69c1665a5d20"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.971392 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.971761 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.971937 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-logs\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.972091 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.972237 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.972438 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.972614 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.972763 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztnj5\" (UniqueName: \"kubernetes.io/projected/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-kube-api-access-ztnj5\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.972910 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.972974 4647 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9ca92afd-9d04-4e91-be1e-69c1665a5d20-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.973032 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmfdl\" (UniqueName: \"kubernetes.io/projected/9ca92afd-9d04-4e91-be1e-69c1665a5d20-kube-api-access-wmfdl\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.973125 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.973198 4647 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.973258 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.977992 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ca92afd-9d04-4e91-be1e-69c1665a5d20" (UID: "9ca92afd-9d04-4e91-be1e-69c1665a5d20"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:20 crc kubenswrapper[4647]: I1128 15:43:20.996422 4647 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.074515 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.075055 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.075202 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-logs\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.075795 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.075929 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.076013 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.076098 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.076176 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztnj5\" (UniqueName: \"kubernetes.io/projected/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-kube-api-access-ztnj5\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.076294 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ca92afd-9d04-4e91-be1e-69c1665a5d20-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.076356 4647 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.076535 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.075745 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-logs\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.076217 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.080458 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.083198 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.084290 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.084492 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.093449 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztnj5\" (UniqueName: \"kubernetes.io/projected/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-kube-api-access-ztnj5\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.192917 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.344373 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.731374 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9ca92afd-9d04-4e91-be1e-69c1665a5d20","Type":"ContainerDied","Data":"e7a281228dd5455cd68f5efece14c769bf38a97c07ab4d9e78deed3c7ae2d62f"} Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.731846 4647 scope.go:117] "RemoveContainer" containerID="6b80d93bd89973e3884b1b26c9e02423767ed625bd64fff9a060eb7635e1f4bb" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.731674 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.772889 4647 scope.go:117] "RemoveContainer" containerID="edb702d5d503c1d5483b4444103c192c94ea73442817a3315be15bc40d48dc42" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.819174 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.833570 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.855488 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.857307 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.865750 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.865937 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.887920 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.971449 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-scripts\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.971522 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.971586 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-logs\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.971613 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.971641 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.971713 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-config-data\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.971751 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsc9k\" (UniqueName: \"kubernetes.io/projected/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-kube-api-access-tsc9k\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:21 crc kubenswrapper[4647]: I1128 15:43:21.971780 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.072052 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.073513 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-scripts\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.073604 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.073659 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-logs\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.073688 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.073706 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.074073 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-config-data\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.074123 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsc9k\" (UniqueName: \"kubernetes.io/projected/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-kube-api-access-tsc9k\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.074155 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.074706 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.074879 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.076265 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-logs\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.082309 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-config-data\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.084221 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.085742 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-scripts\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.098381 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsc9k\" (UniqueName: \"kubernetes.io/projected/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-kube-api-access-tsc9k\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.108026 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.152945 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.186427 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.419209 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="989faff9-98dc-4b76-b0bd-5c90aa13075b" path="/var/lib/kubelet/pods/989faff9-98dc-4b76-b0bd-5c90aa13075b/volumes" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.420291 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ca92afd-9d04-4e91-be1e-69c1665a5d20" path="/var/lib/kubelet/pods/9ca92afd-9d04-4e91-be1e-69c1665a5d20/volumes" Nov 28 15:43:22 crc kubenswrapper[4647]: I1128 15:43:22.742983 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2a15d5b4-a376-4f2e-bbf2-e04474864d2c","Type":"ContainerStarted","Data":"ec4d014cde13e040ae693265e3bb2ebafc38ac1e4088f46d1ad18cc7f635a19f"} Nov 28 15:43:24 crc kubenswrapper[4647]: I1128 15:43:24.964584 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:25 crc kubenswrapper[4647]: I1128 15:43:25.045300 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-gmvlv"] Nov 28 15:43:25 crc kubenswrapper[4647]: I1128 15:43:25.045936 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" podUID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" containerName="dnsmasq-dns" containerID="cri-o://f29bee434829a4fff361ae7c67268f4684a89f6d65e41de7152c6dad108b5945" gracePeriod=10 Nov 28 15:43:25 crc kubenswrapper[4647]: I1128 15:43:25.106598 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-84947f5948-ml477" Nov 28 15:43:25 crc kubenswrapper[4647]: I1128 15:43:25.106750 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-84947f5948-ml477" Nov 28 15:43:25 crc kubenswrapper[4647]: I1128 15:43:25.425566 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:43:25 crc kubenswrapper[4647]: I1128 15:43:25.425688 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:43:25 crc kubenswrapper[4647]: I1128 15:43:25.427166 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-66c6c46cdb-xgv7h" podUID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 28 15:43:26 crc kubenswrapper[4647]: I1128 15:43:26.753890 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" podUID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.139:5353: connect: connection refused" Nov 28 15:43:26 crc kubenswrapper[4647]: I1128 15:43:26.802871 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-2vmmg" podUID="e0476ba4-d83b-4a10-9898-fe3b6b05f76e" containerName="nmstate-handler" probeResult="failure" output="command timed out" Nov 28 15:43:27 crc kubenswrapper[4647]: I1128 15:43:27.794596 4647 generic.go:334] "Generic (PLEG): container finished" podID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" containerID="f29bee434829a4fff361ae7c67268f4684a89f6d65e41de7152c6dad108b5945" exitCode=0 Nov 28 15:43:27 crc kubenswrapper[4647]: I1128 15:43:27.795101 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" event={"ID":"836b0825-d6e3-4fd9-9a1c-34672a0c543c","Type":"ContainerDied","Data":"f29bee434829a4fff361ae7c67268f4684a89f6d65e41de7152c6dad108b5945"} Nov 28 15:43:28 crc kubenswrapper[4647]: I1128 15:43:28.832557 4647 generic.go:334] "Generic (PLEG): container finished" podID="75b1595c-cca5-4809-94cf-c1c0ec937c27" containerID="0c7941f64ec343f785e561d3ff7461d27b9638e37faf8497e409296aacfa6710" exitCode=0 Nov 28 15:43:28 crc kubenswrapper[4647]: I1128 15:43:28.832935 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6k9fd" event={"ID":"75b1595c-cca5-4809-94cf-c1c0ec937c27","Type":"ContainerDied","Data":"0c7941f64ec343f785e561d3ff7461d27b9638e37faf8497e409296aacfa6710"} Nov 28 15:43:29 crc kubenswrapper[4647]: I1128 15:43:29.846318 4647 generic.go:334] "Generic (PLEG): container finished" podID="c34bccdb-127a-40c0-ac61-6e2f354a6c6d" containerID="8677705d8f0bedddf37e0a160a871511a7e394b111b5da45276d87698d685fcb" exitCode=0 Nov 28 15:43:29 crc kubenswrapper[4647]: I1128 15:43:29.846564 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-n92kx" event={"ID":"c34bccdb-127a-40c0-ac61-6e2f354a6c6d","Type":"ContainerDied","Data":"8677705d8f0bedddf37e0a160a871511a7e394b111b5da45276d87698d685fcb"} Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.831277 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.838939 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-swift-storage-0\") pod \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.839036 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-config\") pod \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.839105 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-nb\") pod \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.923916 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-n92kx" event={"ID":"c34bccdb-127a-40c0-ac61-6e2f354a6c6d","Type":"ContainerDied","Data":"ba0b768f477f3917ab94fe0d8005d585f8f90e3bcae8a05f16a6d9251f868108"} Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.923984 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba0b768f477f3917ab94fe0d8005d585f8f90e3bcae8a05f16a6d9251f868108" Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.928118 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6k9fd" event={"ID":"75b1595c-cca5-4809-94cf-c1c0ec937c27","Type":"ContainerDied","Data":"4068bddb1074836792dd59efecab72bbbaa955569af6ab666ad8889a72dc9202"} Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.928151 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4068bddb1074836792dd59efecab72bbbaa955569af6ab666ad8889a72dc9202" Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.929462 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.939151 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" event={"ID":"836b0825-d6e3-4fd9-9a1c-34672a0c543c","Type":"ContainerDied","Data":"7c0ebc76d28b6f59e4206eca42d8fba1664a1d87f01ebca00eca808c69bc1701"} Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.939202 4647 scope.go:117] "RemoveContainer" containerID="f29bee434829a4fff361ae7c67268f4684a89f6d65e41de7152c6dad108b5945" Nov 28 15:43:31 crc kubenswrapper[4647]: I1128 15:43:31.939612 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.055717 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-credential-keys\") pod \"75b1595c-cca5-4809-94cf-c1c0ec937c27\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.055755 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pzbl\" (UniqueName: \"kubernetes.io/projected/75b1595c-cca5-4809-94cf-c1c0ec937c27-kube-api-access-8pzbl\") pod \"75b1595c-cca5-4809-94cf-c1c0ec937c27\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.055828 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-fernet-keys\") pod \"75b1595c-cca5-4809-94cf-c1c0ec937c27\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.055873 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-svc\") pod \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.055982 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-scripts\") pod \"75b1595c-cca5-4809-94cf-c1c0ec937c27\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.056085 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-config-data\") pod \"75b1595c-cca5-4809-94cf-c1c0ec937c27\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.056141 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qcl6\" (UniqueName: \"kubernetes.io/projected/836b0825-d6e3-4fd9-9a1c-34672a0c543c-kube-api-access-5qcl6\") pod \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.056262 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-combined-ca-bundle\") pod \"75b1595c-cca5-4809-94cf-c1c0ec937c27\" (UID: \"75b1595c-cca5-4809-94cf-c1c0ec937c27\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.056367 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-sb\") pod \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\" (UID: \"836b0825-d6e3-4fd9-9a1c-34672a0c543c\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.083656 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "75b1595c-cca5-4809-94cf-c1c0ec937c27" (UID: "75b1595c-cca5-4809-94cf-c1c0ec937c27"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.120027 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "836b0825-d6e3-4fd9-9a1c-34672a0c543c" (UID: "836b0825-d6e3-4fd9-9a1c-34672a0c543c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.132323 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/836b0825-d6e3-4fd9-9a1c-34672a0c543c-kube-api-access-5qcl6" (OuterVolumeSpecName: "kube-api-access-5qcl6") pod "836b0825-d6e3-4fd9-9a1c-34672a0c543c" (UID: "836b0825-d6e3-4fd9-9a1c-34672a0c543c"). InnerVolumeSpecName "kube-api-access-5qcl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.134622 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75b1595c-cca5-4809-94cf-c1c0ec937c27-kube-api-access-8pzbl" (OuterVolumeSpecName: "kube-api-access-8pzbl") pod "75b1595c-cca5-4809-94cf-c1c0ec937c27" (UID: "75b1595c-cca5-4809-94cf-c1c0ec937c27"). InnerVolumeSpecName "kube-api-access-8pzbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.135405 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-scripts" (OuterVolumeSpecName: "scripts") pod "75b1595c-cca5-4809-94cf-c1c0ec937c27" (UID: "75b1595c-cca5-4809-94cf-c1c0ec937c27"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.135470 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "75b1595c-cca5-4809-94cf-c1c0ec937c27" (UID: "75b1595c-cca5-4809-94cf-c1c0ec937c27"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.138001 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-n92kx" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.141586 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "836b0825-d6e3-4fd9-9a1c-34672a0c543c" (UID: "836b0825-d6e3-4fd9-9a1c-34672a0c543c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.160774 4647 scope.go:117] "RemoveContainer" containerID="466e3045459719c468f1ee6316f7cc507d42efec36a2e7ca4a2a9cb8fe70cd4e" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.165730 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-db-sync-config-data\") pod \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.165836 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-combined-ca-bundle\") pod \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.165895 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wt8b9\" (UniqueName: \"kubernetes.io/projected/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-kube-api-access-wt8b9\") pod \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\" (UID: \"c34bccdb-127a-40c0-ac61-6e2f354a6c6d\") " Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.166133 4647 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.166150 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qcl6\" (UniqueName: \"kubernetes.io/projected/836b0825-d6e3-4fd9-9a1c-34672a0c543c-kube-api-access-5qcl6\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.166179 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.166190 4647 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.166198 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pzbl\" (UniqueName: \"kubernetes.io/projected/75b1595c-cca5-4809-94cf-c1c0ec937c27-kube-api-access-8pzbl\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.166208 4647 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.166219 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.185099 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-config-data" (OuterVolumeSpecName: "config-data") pod "75b1595c-cca5-4809-94cf-c1c0ec937c27" (UID: "75b1595c-cca5-4809-94cf-c1c0ec937c27"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.208596 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-kube-api-access-wt8b9" (OuterVolumeSpecName: "kube-api-access-wt8b9") pod "c34bccdb-127a-40c0-ac61-6e2f354a6c6d" (UID: "c34bccdb-127a-40c0-ac61-6e2f354a6c6d"). InnerVolumeSpecName "kube-api-access-wt8b9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.208887 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c34bccdb-127a-40c0-ac61-6e2f354a6c6d" (UID: "c34bccdb-127a-40c0-ac61-6e2f354a6c6d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.244169 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "75b1595c-cca5-4809-94cf-c1c0ec937c27" (UID: "75b1595c-cca5-4809-94cf-c1c0ec937c27"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.268455 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.268487 4647 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.268498 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75b1595c-cca5-4809-94cf-c1c0ec937c27-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.268508 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wt8b9\" (UniqueName: \"kubernetes.io/projected/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-kube-api-access-wt8b9\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.279842 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "836b0825-d6e3-4fd9-9a1c-34672a0c543c" (UID: "836b0825-d6e3-4fd9-9a1c-34672a0c543c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.283576 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-config" (OuterVolumeSpecName: "config") pod "836b0825-d6e3-4fd9-9a1c-34672a0c543c" (UID: "836b0825-d6e3-4fd9-9a1c-34672a0c543c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.321467 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "836b0825-d6e3-4fd9-9a1c-34672a0c543c" (UID: "836b0825-d6e3-4fd9-9a1c-34672a0c543c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.331021 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c34bccdb-127a-40c0-ac61-6e2f354a6c6d" (UID: "c34bccdb-127a-40c0-ac61-6e2f354a6c6d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.372492 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.372542 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.372556 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/836b0825-d6e3-4fd9-9a1c-34672a0c543c-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.372567 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c34bccdb-127a-40c0-ac61-6e2f354a6c6d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.568961 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-gmvlv"] Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.583032 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-gmvlv"] Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.632370 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.952814 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2","Type":"ContainerStarted","Data":"7fe0c4ed14c9d4a69082f3a7b955a5c7329e2d444b6f1d6e93ff95a62234ba54"} Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.956000 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4c49d4cb-49bc-40f4-b1e9-03c730876bdb","Type":"ContainerStarted","Data":"6d9c7eceac7cd47bc3542c38aa735b9f5ae0509f9e839cd0e6c07f4223957e72"} Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.957669 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2a15d5b4-a376-4f2e-bbf2-e04474864d2c","Type":"ContainerStarted","Data":"bd41952dfd11436229ca57cb4d4d06268634c6154092aa193d62c541c3483f0c"} Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.960710 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-n92kx" Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.961449 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4n48c" event={"ID":"fb420869-c42b-47a6-8e22-c4e263d9a666","Type":"ContainerStarted","Data":"e703c83e2676b9ada434ba75eb4631cfd27f062cb2124853751aee90a84e4396"} Nov 28 15:43:32 crc kubenswrapper[4647]: I1128 15:43:32.962309 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6k9fd" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.002956 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-4n48c" podStartSLOduration=3.501299349 podStartE2EDuration="1m8.002937896s" podCreationTimestamp="2025-11-28 15:42:25 +0000 UTC" firstStartedPulling="2025-11-28 15:42:27.451217918 +0000 UTC m=+1077.298824329" lastFinishedPulling="2025-11-28 15:43:31.952856455 +0000 UTC m=+1141.800462876" observedRunningTime="2025-11-28 15:43:32.98750109 +0000 UTC m=+1142.835107511" watchObservedRunningTime="2025-11-28 15:43:33.002937896 +0000 UTC m=+1142.850544317" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.076351 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-8567d4b5-5ss7w"] Nov 28 15:43:33 crc kubenswrapper[4647]: E1128 15:43:33.076846 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75b1595c-cca5-4809-94cf-c1c0ec937c27" containerName="keystone-bootstrap" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.076869 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="75b1595c-cca5-4809-94cf-c1c0ec937c27" containerName="keystone-bootstrap" Nov 28 15:43:33 crc kubenswrapper[4647]: E1128 15:43:33.076885 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" containerName="dnsmasq-dns" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.076894 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" containerName="dnsmasq-dns" Nov 28 15:43:33 crc kubenswrapper[4647]: E1128 15:43:33.076925 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c34bccdb-127a-40c0-ac61-6e2f354a6c6d" containerName="barbican-db-sync" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.076932 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="c34bccdb-127a-40c0-ac61-6e2f354a6c6d" containerName="barbican-db-sync" Nov 28 15:43:33 crc kubenswrapper[4647]: E1128 15:43:33.076950 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" containerName="init" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.076956 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" containerName="init" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.077138 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="c34bccdb-127a-40c0-ac61-6e2f354a6c6d" containerName="barbican-db-sync" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.077158 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="75b1595c-cca5-4809-94cf-c1c0ec937c27" containerName="keystone-bootstrap" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.077177 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" containerName="dnsmasq-dns" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.077924 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.081077 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.081534 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.081662 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.082239 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.082670 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.082894 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-7j874" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.098571 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8567d4b5-5ss7w"] Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.195394 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-internal-tls-certs\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.200394 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-scripts\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.200570 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-config-data\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.200602 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-fernet-keys\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.200638 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-combined-ca-bundle\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.200843 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csjcf\" (UniqueName: \"kubernetes.io/projected/317b992c-8c2d-4838-bfbf-6debefd73d0a-kube-api-access-csjcf\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.200946 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-public-tls-certs\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.201144 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-credential-keys\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.303632 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-credential-keys\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.303717 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-internal-tls-certs\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.303781 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-scripts\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.303848 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-config-data\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.303872 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-fernet-keys\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.303902 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-combined-ca-bundle\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.303938 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csjcf\" (UniqueName: \"kubernetes.io/projected/317b992c-8c2d-4838-bfbf-6debefd73d0a-kube-api-access-csjcf\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.303975 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-public-tls-certs\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.330904 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-combined-ca-bundle\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.332849 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-config-data\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.333281 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-public-tls-certs\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.336055 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-internal-tls-certs\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.347635 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-credential-keys\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.350949 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-scripts\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.355207 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/317b992c-8c2d-4838-bfbf-6debefd73d0a-fernet-keys\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.361432 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-9c6c77dd7-h6hcl"] Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.375111 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.382044 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-p9vz4" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.382285 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.382471 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.417164 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-68d4467b78-mhh9d"] Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.418811 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.425564 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-9c6c77dd7-h6hcl"] Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.428477 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.453569 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-68d4467b78-mhh9d"] Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.460837 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csjcf\" (UniqueName: \"kubernetes.io/projected/317b992c-8c2d-4838-bfbf-6debefd73d0a-kube-api-access-csjcf\") pod \"keystone-8567d4b5-5ss7w\" (UID: \"317b992c-8c2d-4838-bfbf-6debefd73d0a\") " pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.525505 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ae86fe-ca94-41f0-880a-d957edd96160-combined-ca-bundle\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.525553 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52ae86fe-ca94-41f0-880a-d957edd96160-logs\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.525579 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a70c88e0-3df8-484f-8343-2bf87f6c9f33-config-data\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.525617 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgjb5\" (UniqueName: \"kubernetes.io/projected/a70c88e0-3df8-484f-8343-2bf87f6c9f33-kube-api-access-fgjb5\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.525653 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a70c88e0-3df8-484f-8343-2bf87f6c9f33-logs\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.527054 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a70c88e0-3df8-484f-8343-2bf87f6c9f33-combined-ca-bundle\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.527156 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ae86fe-ca94-41f0-880a-d957edd96160-config-data\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.527181 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57m8p\" (UniqueName: \"kubernetes.io/projected/52ae86fe-ca94-41f0-880a-d957edd96160-kube-api-access-57m8p\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.527203 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a70c88e0-3df8-484f-8343-2bf87f6c9f33-config-data-custom\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.527231 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52ae86fe-ca94-41f0-880a-d957edd96160-config-data-custom\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.557399 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-wbs2b"] Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.561255 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.630977 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a70c88e0-3df8-484f-8343-2bf87f6c9f33-logs\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.631053 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a70c88e0-3df8-484f-8343-2bf87f6c9f33-combined-ca-bundle\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.631111 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ae86fe-ca94-41f0-880a-d957edd96160-config-data\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.631132 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57m8p\" (UniqueName: \"kubernetes.io/projected/52ae86fe-ca94-41f0-880a-d957edd96160-kube-api-access-57m8p\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.631151 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a70c88e0-3df8-484f-8343-2bf87f6c9f33-config-data-custom\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.631171 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52ae86fe-ca94-41f0-880a-d957edd96160-config-data-custom\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.631210 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ae86fe-ca94-41f0-880a-d957edd96160-combined-ca-bundle\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.631228 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52ae86fe-ca94-41f0-880a-d957edd96160-logs\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.631247 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a70c88e0-3df8-484f-8343-2bf87f6c9f33-config-data\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.631278 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgjb5\" (UniqueName: \"kubernetes.io/projected/a70c88e0-3df8-484f-8343-2bf87f6c9f33-kube-api-access-fgjb5\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.632837 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a70c88e0-3df8-484f-8343-2bf87f6c9f33-logs\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.637803 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a70c88e0-3df8-484f-8343-2bf87f6c9f33-config-data-custom\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.638323 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a70c88e0-3df8-484f-8343-2bf87f6c9f33-combined-ca-bundle\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.638621 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52ae86fe-ca94-41f0-880a-d957edd96160-logs\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.641683 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ae86fe-ca94-41f0-880a-d957edd96160-combined-ca-bundle\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.642842 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ae86fe-ca94-41f0-880a-d957edd96160-config-data\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.646373 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a70c88e0-3df8-484f-8343-2bf87f6c9f33-config-data\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.649128 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52ae86fe-ca94-41f0-880a-d957edd96160-config-data-custom\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.678245 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-wbs2b"] Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.715532 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.738401 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-svc\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.738474 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.738507 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cn5lv\" (UniqueName: \"kubernetes.io/projected/3568d112-eb07-4349-83bc-d42c2e5ec135-kube-api-access-cn5lv\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.738676 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-config\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.738707 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.738736 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.775619 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57m8p\" (UniqueName: \"kubernetes.io/projected/52ae86fe-ca94-41f0-880a-d957edd96160-kube-api-access-57m8p\") pod \"barbican-worker-9c6c77dd7-h6hcl\" (UID: \"52ae86fe-ca94-41f0-880a-d957edd96160\") " pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.798382 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgjb5\" (UniqueName: \"kubernetes.io/projected/a70c88e0-3df8-484f-8343-2bf87f6c9f33-kube-api-access-fgjb5\") pod \"barbican-keystone-listener-68d4467b78-mhh9d\" (UID: \"a70c88e0-3df8-484f-8343-2bf87f6c9f33\") " pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.827048 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-9d6dbf854-hbzkr"] Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.829164 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.844070 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.853608 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-config\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.853695 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.853760 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.853880 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-svc\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.853946 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.854003 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cn5lv\" (UniqueName: \"kubernetes.io/projected/3568d112-eb07-4349-83bc-d42c2e5ec135-kube-api-access-cn5lv\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.855148 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-config\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.855332 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-svc\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.855921 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.858359 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.861897 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.873255 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-9d6dbf854-hbzkr"] Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.902191 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cn5lv\" (UniqueName: \"kubernetes.io/projected/3568d112-eb07-4349-83bc-d42c2e5ec135-kube-api-access-cn5lv\") pod \"dnsmasq-dns-688c87cc99-wbs2b\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.934560 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.959053 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-combined-ca-bundle\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.959814 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vg67r\" (UniqueName: \"kubernetes.io/projected/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-kube-api-access-vg67r\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.959865 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.959891 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data-custom\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.959998 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-logs\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:33 crc kubenswrapper[4647]: I1128 15:43:33.999580 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4c49d4cb-49bc-40f4-b1e9-03c730876bdb","Type":"ContainerStarted","Data":"dfe5f51562f95457637fa84764fa1b2996ec3b1fca9e5ebbb693c1255ea1f996"} Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.015460 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-p265g" event={"ID":"4f54b294-79b2-4097-9011-f094f66cc705","Type":"ContainerStarted","Data":"2eab7b2bfb275405cd699a6b63cd2f3543dfddc8df6e0b930ae4d2422889ac00"} Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.042373 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-p265g" podStartSLOduration=3.753696466 podStartE2EDuration="1m8.042354228s" podCreationTimestamp="2025-11-28 15:42:26 +0000 UTC" firstStartedPulling="2025-11-28 15:42:27.645651905 +0000 UTC m=+1077.493258326" lastFinishedPulling="2025-11-28 15:43:31.934309667 +0000 UTC m=+1141.781916088" observedRunningTime="2025-11-28 15:43:34.041428603 +0000 UTC m=+1143.889035024" watchObservedRunningTime="2025-11-28 15:43:34.042354228 +0000 UTC m=+1143.889960649" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.053499 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-9c6c77dd7-h6hcl" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.062979 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-combined-ca-bundle\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.063036 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vg67r\" (UniqueName: \"kubernetes.io/projected/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-kube-api-access-vg67r\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.063058 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.063086 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data-custom\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.063121 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-logs\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.063522 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-logs\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.065659 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.071125 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.078551 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data-custom\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.080731 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vg67r\" (UniqueName: \"kubernetes.io/projected/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-kube-api-access-vg67r\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.092554 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-combined-ca-bundle\") pod \"barbican-api-9d6dbf854-hbzkr\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.245860 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.460469 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" path="/var/lib/kubelet/pods/836b0825-d6e3-4fd9-9a1c-34672a0c543c/volumes" Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.609769 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8567d4b5-5ss7w"] Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.778915 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-wbs2b"] Nov 28 15:43:34 crc kubenswrapper[4647]: I1128 15:43:34.985067 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-68d4467b78-mhh9d"] Nov 28 15:43:35 crc kubenswrapper[4647]: I1128 15:43:35.021062 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-9c6c77dd7-h6hcl"] Nov 28 15:43:35 crc kubenswrapper[4647]: I1128 15:43:35.057162 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" event={"ID":"3568d112-eb07-4349-83bc-d42c2e5ec135","Type":"ContainerStarted","Data":"3835cacb51f83b075dbde9ef395b8ecbb6ee63292443dc22ef50ee935f5f4b2e"} Nov 28 15:43:35 crc kubenswrapper[4647]: I1128 15:43:35.058509 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8567d4b5-5ss7w" event={"ID":"317b992c-8c2d-4838-bfbf-6debefd73d0a","Type":"ContainerStarted","Data":"e9aabaae453f091ead94606be1621f4e054c6f0da4001a2c2bd896a0f935b08d"} Nov 28 15:43:35 crc kubenswrapper[4647]: I1128 15:43:35.061546 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2a15d5b4-a376-4f2e-bbf2-e04474864d2c","Type":"ContainerStarted","Data":"2d73c415a9268be2172ddccb53651d0ba7ee43385a1415a1a5f80af0b497103e"} Nov 28 15:43:35 crc kubenswrapper[4647]: I1128 15:43:35.065454 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" event={"ID":"a70c88e0-3df8-484f-8343-2bf87f6c9f33","Type":"ContainerStarted","Data":"517370c2ac06227ff145e4a75b8e8769c2bcd66fc0488edbb2cc8af9f4fc4fea"} Nov 28 15:43:35 crc kubenswrapper[4647]: W1128 15:43:35.080744 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52ae86fe_ca94_41f0_880a_d957edd96160.slice/crio-7454776efb1f177fb4f79d1b30f04270720228da2288a8e59f30701d08ffe4a5 WatchSource:0}: Error finding container 7454776efb1f177fb4f79d1b30f04270720228da2288a8e59f30701d08ffe4a5: Status 404 returned error can't find the container with id 7454776efb1f177fb4f79d1b30f04270720228da2288a8e59f30701d08ffe4a5 Nov 28 15:43:35 crc kubenswrapper[4647]: I1128 15:43:35.118316 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-84947f5948-ml477" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 28 15:43:35 crc kubenswrapper[4647]: I1128 15:43:35.120226 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=15.120214401 podStartE2EDuration="15.120214401s" podCreationTimestamp="2025-11-28 15:43:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:35.095106351 +0000 UTC m=+1144.942712772" watchObservedRunningTime="2025-11-28 15:43:35.120214401 +0000 UTC m=+1144.967820812" Nov 28 15:43:35 crc kubenswrapper[4647]: I1128 15:43:35.181563 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-9d6dbf854-hbzkr"] Nov 28 15:43:35 crc kubenswrapper[4647]: I1128 15:43:35.429441 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-66c6c46cdb-xgv7h" podUID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.103038 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-9c6c77dd7-h6hcl" event={"ID":"52ae86fe-ca94-41f0-880a-d957edd96160","Type":"ContainerStarted","Data":"7454776efb1f177fb4f79d1b30f04270720228da2288a8e59f30701d08ffe4a5"} Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.108247 4647 generic.go:334] "Generic (PLEG): container finished" podID="3568d112-eb07-4349-83bc-d42c2e5ec135" containerID="9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf" exitCode=0 Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.108299 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" event={"ID":"3568d112-eb07-4349-83bc-d42c2e5ec135","Type":"ContainerDied","Data":"9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf"} Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.118768 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9d6dbf854-hbzkr" event={"ID":"6c015d44-7f9b-482e-8f6d-6efba45aa6ea","Type":"ContainerStarted","Data":"983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500"} Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.118817 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9d6dbf854-hbzkr" event={"ID":"6c015d44-7f9b-482e-8f6d-6efba45aa6ea","Type":"ContainerStarted","Data":"e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c"} Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.118829 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9d6dbf854-hbzkr" event={"ID":"6c015d44-7f9b-482e-8f6d-6efba45aa6ea","Type":"ContainerStarted","Data":"90d04a661d02ac7f9b2d9e760910f415a6317becdca9e0cac9d82599ec550aa6"} Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.168873 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4c49d4cb-49bc-40f4-b1e9-03c730876bdb","Type":"ContainerStarted","Data":"fb47b2636e4ccbf78e445df7f2965c002d36fc7be4b00ba47ce4f5787482f287"} Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.198955 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8567d4b5-5ss7w" event={"ID":"317b992c-8c2d-4838-bfbf-6debefd73d0a","Type":"ContainerStarted","Data":"df4a89395ef0ed11357f4ab0dc9201d3aae3ccec6330c795a971b8e35f2628df"} Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.199041 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.382032 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=15.382008024 podStartE2EDuration="15.382008024s" podCreationTimestamp="2025-11-28 15:43:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:36.264936713 +0000 UTC m=+1146.112543134" watchObservedRunningTime="2025-11-28 15:43:36.382008024 +0000 UTC m=+1146.229614445" Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.403540 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-8567d4b5-5ss7w" podStartSLOduration=3.40351425 podStartE2EDuration="3.40351425s" podCreationTimestamp="2025-11-28 15:43:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:36.322002255 +0000 UTC m=+1146.169608676" watchObservedRunningTime="2025-11-28 15:43:36.40351425 +0000 UTC m=+1146.251120671" Nov 28 15:43:36 crc kubenswrapper[4647]: I1128 15:43:36.754772 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-fcfdd6f9f-gmvlv" podUID="836b0825-d6e3-4fd9-9a1c-34672a0c543c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.139:5353: i/o timeout" Nov 28 15:43:37 crc kubenswrapper[4647]: I1128 15:43:37.218316 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:37 crc kubenswrapper[4647]: I1128 15:43:37.218923 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:37 crc kubenswrapper[4647]: I1128 15:43:37.243874 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-9d6dbf854-hbzkr" podStartSLOduration=4.243850713 podStartE2EDuration="4.243850713s" podCreationTimestamp="2025-11-28 15:43:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:37.242044985 +0000 UTC m=+1147.089651406" watchObservedRunningTime="2025-11-28 15:43:37.243850713 +0000 UTC m=+1147.091457134" Nov 28 15:43:37 crc kubenswrapper[4647]: I1128 15:43:37.825522 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-8f8ccd5d4-thgh2"] Nov 28 15:43:37 crc kubenswrapper[4647]: I1128 15:43:37.827074 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:37 crc kubenswrapper[4647]: I1128 15:43:37.830983 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 28 15:43:37 crc kubenswrapper[4647]: I1128 15:43:37.831057 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 28 15:43:37 crc kubenswrapper[4647]: I1128 15:43:37.863616 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-8f8ccd5d4-thgh2"] Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.021446 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-internal-tls-certs\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.021520 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9mm6\" (UniqueName: \"kubernetes.io/projected/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-kube-api-access-n9mm6\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.022375 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-public-tls-certs\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.022461 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-logs\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.022566 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-combined-ca-bundle\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.022620 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-config-data\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.022641 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-config-data-custom\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.125137 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-public-tls-certs\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.125191 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-logs\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.125242 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-combined-ca-bundle\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.125267 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-config-data\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.125287 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-config-data-custom\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.125368 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-internal-tls-certs\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.125395 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9mm6\" (UniqueName: \"kubernetes.io/projected/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-kube-api-access-n9mm6\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.125716 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-logs\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.145157 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-config-data-custom\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.149477 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-config-data\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.149706 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-public-tls-certs\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.152952 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-combined-ca-bundle\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.159320 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9mm6\" (UniqueName: \"kubernetes.io/projected/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-kube-api-access-n9mm6\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.175567 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cad9ee6e-4bee-49c6-9f24-7c97e6e745ed-internal-tls-certs\") pod \"barbican-api-8f8ccd5d4-thgh2\" (UID: \"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed\") " pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.248749 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" event={"ID":"3568d112-eb07-4349-83bc-d42c2e5ec135","Type":"ContainerStarted","Data":"4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98"} Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.248890 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.278721 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" podStartSLOduration=5.278703094 podStartE2EDuration="5.278703094s" podCreationTimestamp="2025-11-28 15:43:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:38.272552403 +0000 UTC m=+1148.120158824" watchObservedRunningTime="2025-11-28 15:43:38.278703094 +0000 UTC m=+1148.126309515" Nov 28 15:43:38 crc kubenswrapper[4647]: I1128 15:43:38.452358 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:39 crc kubenswrapper[4647]: I1128 15:43:39.262975 4647 generic.go:334] "Generic (PLEG): container finished" podID="fb420869-c42b-47a6-8e22-c4e263d9a666" containerID="e703c83e2676b9ada434ba75eb4631cfd27f062cb2124853751aee90a84e4396" exitCode=0 Nov 28 15:43:39 crc kubenswrapper[4647]: I1128 15:43:39.263153 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4n48c" event={"ID":"fb420869-c42b-47a6-8e22-c4e263d9a666","Type":"ContainerDied","Data":"e703c83e2676b9ada434ba75eb4631cfd27f062cb2124853751aee90a84e4396"} Nov 28 15:43:40 crc kubenswrapper[4647]: I1128 15:43:40.840786 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4n48c" Nov 28 15:43:40 crc kubenswrapper[4647]: I1128 15:43:40.965332 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-config-data\") pod \"fb420869-c42b-47a6-8e22-c4e263d9a666\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " Nov 28 15:43:40 crc kubenswrapper[4647]: I1128 15:43:40.965623 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb420869-c42b-47a6-8e22-c4e263d9a666-logs\") pod \"fb420869-c42b-47a6-8e22-c4e263d9a666\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " Nov 28 15:43:40 crc kubenswrapper[4647]: I1128 15:43:40.965682 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-scripts\") pod \"fb420869-c42b-47a6-8e22-c4e263d9a666\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " Nov 28 15:43:40 crc kubenswrapper[4647]: I1128 15:43:40.965773 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-combined-ca-bundle\") pod \"fb420869-c42b-47a6-8e22-c4e263d9a666\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " Nov 28 15:43:40 crc kubenswrapper[4647]: I1128 15:43:40.965902 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6c8bp\" (UniqueName: \"kubernetes.io/projected/fb420869-c42b-47a6-8e22-c4e263d9a666-kube-api-access-6c8bp\") pod \"fb420869-c42b-47a6-8e22-c4e263d9a666\" (UID: \"fb420869-c42b-47a6-8e22-c4e263d9a666\") " Nov 28 15:43:40 crc kubenswrapper[4647]: I1128 15:43:40.967071 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb420869-c42b-47a6-8e22-c4e263d9a666-logs" (OuterVolumeSpecName: "logs") pod "fb420869-c42b-47a6-8e22-c4e263d9a666" (UID: "fb420869-c42b-47a6-8e22-c4e263d9a666"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:40 crc kubenswrapper[4647]: I1128 15:43:40.974796 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-scripts" (OuterVolumeSpecName: "scripts") pod "fb420869-c42b-47a6-8e22-c4e263d9a666" (UID: "fb420869-c42b-47a6-8e22-c4e263d9a666"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:40 crc kubenswrapper[4647]: I1128 15:43:40.980880 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb420869-c42b-47a6-8e22-c4e263d9a666-kube-api-access-6c8bp" (OuterVolumeSpecName: "kube-api-access-6c8bp") pod "fb420869-c42b-47a6-8e22-c4e263d9a666" (UID: "fb420869-c42b-47a6-8e22-c4e263d9a666"). InnerVolumeSpecName "kube-api-access-6c8bp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:40 crc kubenswrapper[4647]: I1128 15:43:40.996668 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-8f8ccd5d4-thgh2"] Nov 28 15:43:41 crc kubenswrapper[4647]: W1128 15:43:40.999655 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcad9ee6e_4bee_49c6_9f24_7c97e6e745ed.slice/crio-a0bd252297a8aedc2d4b6b5b956e846de9d0f7efdc6897068f34f54198544cca WatchSource:0}: Error finding container a0bd252297a8aedc2d4b6b5b956e846de9d0f7efdc6897068f34f54198544cca: Status 404 returned error can't find the container with id a0bd252297a8aedc2d4b6b5b956e846de9d0f7efdc6897068f34f54198544cca Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.018850 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb420869-c42b-47a6-8e22-c4e263d9a666" (UID: "fb420869-c42b-47a6-8e22-c4e263d9a666"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.023372 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-config-data" (OuterVolumeSpecName: "config-data") pod "fb420869-c42b-47a6-8e22-c4e263d9a666" (UID: "fb420869-c42b-47a6-8e22-c4e263d9a666"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.067970 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb420869-c42b-47a6-8e22-c4e263d9a666-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.068003 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.068049 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.068061 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6c8bp\" (UniqueName: \"kubernetes.io/projected/fb420869-c42b-47a6-8e22-c4e263d9a666-kube-api-access-6c8bp\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.068071 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb420869-c42b-47a6-8e22-c4e263d9a666-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.339754 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-8f8ccd5d4-thgh2" event={"ID":"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed","Type":"ContainerStarted","Data":"a986daad732b6ee80cc170a0e10124bcd05f9876d028cdc94025753347e8418b"} Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.339826 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-8f8ccd5d4-thgh2" event={"ID":"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed","Type":"ContainerStarted","Data":"a0bd252297a8aedc2d4b6b5b956e846de9d0f7efdc6897068f34f54198544cca"} Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.346157 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4n48c" event={"ID":"fb420869-c42b-47a6-8e22-c4e263d9a666","Type":"ContainerDied","Data":"fcdbc55e5a093e0dc3917732bef1c4faaf0363e90f224be0facdb493319e0d65"} Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.346211 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fcdbc55e5a093e0dc3917732bef1c4faaf0363e90f224be0facdb493319e0d65" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.346305 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4n48c" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.351842 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.351901 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.371857 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" event={"ID":"a70c88e0-3df8-484f-8343-2bf87f6c9f33","Type":"ContainerStarted","Data":"77cca6ff9ca7a4ac6d29526d3229b389632700eb9f143eb673a96a28fbe27543"} Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.371908 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" event={"ID":"a70c88e0-3df8-484f-8343-2bf87f6c9f33","Type":"ContainerStarted","Data":"f7345f11891996e7cf64154196ab9c5cbec36a004fc5903312eda803ff1a07fa"} Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.382184 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-9c6c77dd7-h6hcl" event={"ID":"52ae86fe-ca94-41f0-880a-d957edd96160","Type":"ContainerStarted","Data":"22879045ab0e70a7ff87fad1a0cb55bef48a731fcd8b00e6f6a6b2a8c173ba68"} Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.382243 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-9c6c77dd7-h6hcl" event={"ID":"52ae86fe-ca94-41f0-880a-d957edd96160","Type":"ContainerStarted","Data":"7cf520b42bba595050d10ed855865cb70ba65bd55aaf0cd124117aad4a6fbaa7"} Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.473518 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-68d4467b78-mhh9d" podStartSLOduration=3.21157415 podStartE2EDuration="8.473490443s" podCreationTimestamp="2025-11-28 15:43:33 +0000 UTC" firstStartedPulling="2025-11-28 15:43:34.994790991 +0000 UTC m=+1144.842397412" lastFinishedPulling="2025-11-28 15:43:40.256707284 +0000 UTC m=+1150.104313705" observedRunningTime="2025-11-28 15:43:41.415346713 +0000 UTC m=+1151.262953134" watchObservedRunningTime="2025-11-28 15:43:41.473490443 +0000 UTC m=+1151.321096864" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.478185 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.497031 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-9c6c77dd7-h6hcl" podStartSLOduration=3.311959791 podStartE2EDuration="8.496604101s" podCreationTimestamp="2025-11-28 15:43:33 +0000 UTC" firstStartedPulling="2025-11-28 15:43:35.085814496 +0000 UTC m=+1144.933420917" lastFinishedPulling="2025-11-28 15:43:40.270458806 +0000 UTC m=+1150.118065227" observedRunningTime="2025-11-28 15:43:41.463176102 +0000 UTC m=+1151.310782523" watchObservedRunningTime="2025-11-28 15:43:41.496604101 +0000 UTC m=+1151.344210522" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.539518 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6565667588-kf4hg"] Nov 28 15:43:41 crc kubenswrapper[4647]: E1128 15:43:41.540144 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb420869-c42b-47a6-8e22-c4e263d9a666" containerName="placement-db-sync" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.540158 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb420869-c42b-47a6-8e22-c4e263d9a666" containerName="placement-db-sync" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.540376 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb420869-c42b-47a6-8e22-c4e263d9a666" containerName="placement-db-sync" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.541629 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.548681 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.549212 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.549342 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.549494 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-mk2rq" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.549600 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.603741 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6565667588-kf4hg"] Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.604960 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.688459 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-internal-tls-certs\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.688541 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-logs\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.688577 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-public-tls-certs\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.688605 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brwb6\" (UniqueName: \"kubernetes.io/projected/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-kube-api-access-brwb6\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.688752 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-combined-ca-bundle\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.688779 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-scripts\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.688803 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-config-data\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.793764 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-internal-tls-certs\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.793831 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-logs\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.793858 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-public-tls-certs\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.793881 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brwb6\" (UniqueName: \"kubernetes.io/projected/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-kube-api-access-brwb6\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.793980 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-combined-ca-bundle\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.794003 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-scripts\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.794022 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-config-data\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.795442 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-logs\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.804955 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-combined-ca-bundle\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.810396 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brwb6\" (UniqueName: \"kubernetes.io/projected/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-kube-api-access-brwb6\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.811159 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-internal-tls-certs\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.811452 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-scripts\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.813436 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-config-data\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.830895 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407-public-tls-certs\") pod \"placement-6565667588-kf4hg\" (UID: \"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407\") " pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:41 crc kubenswrapper[4647]: I1128 15:43:41.871302 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.190679 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.190922 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.304353 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.323345 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.455115 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-8f8ccd5d4-thgh2" event={"ID":"cad9ee6e-4bee-49c6-9f24-7c97e6e745ed","Type":"ContainerStarted","Data":"6d8d6601684f37a70a17160e5443018f9d09ddd8f20567479032ebbb81956a8e"} Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.455158 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.456612 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.456677 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.456692 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.456706 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.456717 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.601940 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-8f8ccd5d4-thgh2" podStartSLOduration=5.601917857 podStartE2EDuration="5.601917857s" podCreationTimestamp="2025-11-28 15:43:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:42.483913352 +0000 UTC m=+1152.331519773" watchObservedRunningTime="2025-11-28 15:43:42.601917857 +0000 UTC m=+1152.449524278" Nov 28 15:43:42 crc kubenswrapper[4647]: I1128 15:43:42.603175 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6565667588-kf4hg"] Nov 28 15:43:43 crc kubenswrapper[4647]: I1128 15:43:43.937391 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:43:43 crc kubenswrapper[4647]: I1128 15:43:43.996315 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-2xxj2"] Nov 28 15:43:43 crc kubenswrapper[4647]: I1128 15:43:43.997685 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" podUID="d7b93065-ae7a-4916-ac48-a672dd1048cb" containerName="dnsmasq-dns" containerID="cri-o://cd4cee753584194ed7fc31e2f6710ccbeede19870d6b88550eead7cbe9337dda" gracePeriod=10 Nov 28 15:43:44 crc kubenswrapper[4647]: I1128 15:43:44.509102 4647 generic.go:334] "Generic (PLEG): container finished" podID="d7b93065-ae7a-4916-ac48-a672dd1048cb" containerID="cd4cee753584194ed7fc31e2f6710ccbeede19870d6b88550eead7cbe9337dda" exitCode=0 Nov 28 15:43:44 crc kubenswrapper[4647]: I1128 15:43:44.509176 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" event={"ID":"d7b93065-ae7a-4916-ac48-a672dd1048cb","Type":"ContainerDied","Data":"cd4cee753584194ed7fc31e2f6710ccbeede19870d6b88550eead7cbe9337dda"} Nov 28 15:43:44 crc kubenswrapper[4647]: I1128 15:43:44.530344 4647 generic.go:334] "Generic (PLEG): container finished" podID="4f54b294-79b2-4097-9011-f094f66cc705" containerID="2eab7b2bfb275405cd699a6b63cd2f3543dfddc8df6e0b930ae4d2422889ac00" exitCode=0 Nov 28 15:43:44 crc kubenswrapper[4647]: I1128 15:43:44.531402 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-p265g" event={"ID":"4f54b294-79b2-4097-9011-f094f66cc705","Type":"ContainerDied","Data":"2eab7b2bfb275405cd699a6b63cd2f3543dfddc8df6e0b930ae4d2422889ac00"} Nov 28 15:43:44 crc kubenswrapper[4647]: I1128 15:43:44.531817 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:43:44 crc kubenswrapper[4647]: I1128 15:43:44.531834 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:43:44 crc kubenswrapper[4647]: I1128 15:43:44.532381 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:43:44 crc kubenswrapper[4647]: I1128 15:43:44.532400 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:43:44 crc kubenswrapper[4647]: I1128 15:43:44.928495 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:44 crc kubenswrapper[4647]: I1128 15:43:44.964188 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" podUID="d7b93065-ae7a-4916-ac48-a672dd1048cb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.150:5353: connect: connection refused" Nov 28 15:43:45 crc kubenswrapper[4647]: I1128 15:43:45.108985 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-84947f5948-ml477" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 28 15:43:45 crc kubenswrapper[4647]: I1128 15:43:45.426613 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-66c6c46cdb-xgv7h" podUID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 28 15:43:45 crc kubenswrapper[4647]: I1128 15:43:45.427098 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:43:45 crc kubenswrapper[4647]: I1128 15:43:45.428376 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"420ea4980bc1bd80fd5479d418826195243c81ba3ab1d29a3b1bf8f7eb2fbb66"} pod="openstack/horizon-66c6c46cdb-xgv7h" containerMessage="Container horizon failed startup probe, will be restarted" Nov 28 15:43:45 crc kubenswrapper[4647]: I1128 15:43:45.428559 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-66c6c46cdb-xgv7h" podUID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerName="horizon" containerID="cri-o://420ea4980bc1bd80fd5479d418826195243c81ba3ab1d29a3b1bf8f7eb2fbb66" gracePeriod=30 Nov 28 15:43:47 crc kubenswrapper[4647]: I1128 15:43:47.426956 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:47 crc kubenswrapper[4647]: I1128 15:43:47.572291 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:48 crc kubenswrapper[4647]: I1128 15:43:48.036514 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-76d7749889-z87rt" Nov 28 15:43:48 crc kubenswrapper[4647]: I1128 15:43:48.111080 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-678f98b656-ftpzl"] Nov 28 15:43:48 crc kubenswrapper[4647]: I1128 15:43:48.111312 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-678f98b656-ftpzl" podUID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" containerName="neutron-api" containerID="cri-o://7563fe0020ecc9b1f60fa298d03bcd154ddcc5215012322fde02968b070b7939" gracePeriod=30 Nov 28 15:43:48 crc kubenswrapper[4647]: I1128 15:43:48.111862 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-678f98b656-ftpzl" podUID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" containerName="neutron-httpd" containerID="cri-o://533eafe7adce88e49f74eb427938363d9b928e13c2700cbed87f0eab78c3a711" gracePeriod=30 Nov 28 15:43:49 crc kubenswrapper[4647]: I1128 15:43:49.011569 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:49 crc kubenswrapper[4647]: I1128 15:43:49.011765 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:43:49 crc kubenswrapper[4647]: I1128 15:43:49.109853 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:43:49 crc kubenswrapper[4647]: I1128 15:43:49.110038 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:43:49 crc kubenswrapper[4647]: I1128 15:43:49.110600 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:43:49 crc kubenswrapper[4647]: I1128 15:43:49.335950 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:43:49 crc kubenswrapper[4647]: I1128 15:43:49.632636 4647 generic.go:334] "Generic (PLEG): container finished" podID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" containerID="533eafe7adce88e49f74eb427938363d9b928e13c2700cbed87f0eab78c3a711" exitCode=0 Nov 28 15:43:49 crc kubenswrapper[4647]: I1128 15:43:49.632985 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-678f98b656-ftpzl" event={"ID":"51f88ecc-6eae-4026-bb05-ec69c3bc65ee","Type":"ContainerDied","Data":"533eafe7adce88e49f74eb427938363d9b928e13c2700cbed87f0eab78c3a711"} Nov 28 15:43:49 crc kubenswrapper[4647]: I1128 15:43:49.964632 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" podUID="d7b93065-ae7a-4916-ac48-a672dd1048cb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.150:5353: connect: connection refused" Nov 28 15:43:51 crc kubenswrapper[4647]: I1128 15:43:51.025814 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:51 crc kubenswrapper[4647]: I1128 15:43:51.146696 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-8f8ccd5d4-thgh2" Nov 28 15:43:51 crc kubenswrapper[4647]: I1128 15:43:51.250570 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-9d6dbf854-hbzkr"] Nov 28 15:43:51 crc kubenswrapper[4647]: I1128 15:43:51.250847 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-9d6dbf854-hbzkr" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerName="barbican-api-log" containerID="cri-o://e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c" gracePeriod=30 Nov 28 15:43:51 crc kubenswrapper[4647]: I1128 15:43:51.251364 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-9d6dbf854-hbzkr" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerName="barbican-api" containerID="cri-o://983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500" gracePeriod=30 Nov 28 15:43:52 crc kubenswrapper[4647]: I1128 15:43:52.666975 4647 generic.go:334] "Generic (PLEG): container finished" podID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerID="e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c" exitCode=143 Nov 28 15:43:52 crc kubenswrapper[4647]: I1128 15:43:52.667507 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9d6dbf854-hbzkr" event={"ID":"6c015d44-7f9b-482e-8f6d-6efba45aa6ea","Type":"ContainerDied","Data":"e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c"} Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.314464 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-p265g" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.439137 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-config-data\") pod \"4f54b294-79b2-4097-9011-f094f66cc705\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.439313 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4f54b294-79b2-4097-9011-f094f66cc705-etc-machine-id\") pod \"4f54b294-79b2-4097-9011-f094f66cc705\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.439403 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-scripts\") pod \"4f54b294-79b2-4097-9011-f094f66cc705\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.439452 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4mbr\" (UniqueName: \"kubernetes.io/projected/4f54b294-79b2-4097-9011-f094f66cc705-kube-api-access-h4mbr\") pod \"4f54b294-79b2-4097-9011-f094f66cc705\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.439520 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-combined-ca-bundle\") pod \"4f54b294-79b2-4097-9011-f094f66cc705\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.439550 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-db-sync-config-data\") pod \"4f54b294-79b2-4097-9011-f094f66cc705\" (UID: \"4f54b294-79b2-4097-9011-f094f66cc705\") " Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.441546 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4f54b294-79b2-4097-9011-f094f66cc705-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4f54b294-79b2-4097-9011-f094f66cc705" (UID: "4f54b294-79b2-4097-9011-f094f66cc705"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.449430 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-scripts" (OuterVolumeSpecName: "scripts") pod "4f54b294-79b2-4097-9011-f094f66cc705" (UID: "4f54b294-79b2-4097-9011-f094f66cc705"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.450627 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "4f54b294-79b2-4097-9011-f094f66cc705" (UID: "4f54b294-79b2-4097-9011-f094f66cc705"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.465627 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f54b294-79b2-4097-9011-f094f66cc705-kube-api-access-h4mbr" (OuterVolumeSpecName: "kube-api-access-h4mbr") pod "4f54b294-79b2-4097-9011-f094f66cc705" (UID: "4f54b294-79b2-4097-9011-f094f66cc705"). InnerVolumeSpecName "kube-api-access-h4mbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.542683 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.545887 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4mbr\" (UniqueName: \"kubernetes.io/projected/4f54b294-79b2-4097-9011-f094f66cc705-kube-api-access-h4mbr\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.545901 4647 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.545913 4647 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4f54b294-79b2-4097-9011-f094f66cc705-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.561286 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f54b294-79b2-4097-9011-f094f66cc705" (UID: "4f54b294-79b2-4097-9011-f094f66cc705"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.592926 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-config-data" (OuterVolumeSpecName: "config-data") pod "4f54b294-79b2-4097-9011-f094f66cc705" (UID: "4f54b294-79b2-4097-9011-f094f66cc705"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.648903 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.648935 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f54b294-79b2-4097-9011-f094f66cc705-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.686590 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6565667588-kf4hg" event={"ID":"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407","Type":"ContainerStarted","Data":"295cf458f95308078acad19b070092dd3291a0af17638bb79b01031504535e9b"} Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.688895 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-p265g" event={"ID":"4f54b294-79b2-4097-9011-f094f66cc705","Type":"ContainerDied","Data":"c05262de4a7a039b884495f50bf3d452de408287d52ff65ae9959f176f2d38b6"} Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.688927 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c05262de4a7a039b884495f50bf3d452de408287d52ff65ae9959f176f2d38b6" Nov 28 15:43:53 crc kubenswrapper[4647]: I1128 15:43:53.688979 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-p265g" Nov 28 15:43:54 crc kubenswrapper[4647]: E1128 15:43:54.061253 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/ubi9/httpd-24:latest" Nov 28 15:43:54 crc kubenswrapper[4647]: E1128 15:43:54.061461 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:proxy-httpd,Image:registry.redhat.io/ubi9/httpd-24:latest,Command:[/usr/sbin/httpd],Args:[-DFOREGROUND],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:proxy-httpd,HostPort:0,ContainerPort:3000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf/httpd.conf,SubPath:httpd.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf.d/ssl.conf,SubPath:ssl.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:run-httpd,ReadOnly:false,MountPath:/run/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:log-httpd,ReadOnly:false,MountPath:/var/log/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zwg9m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:43:54 crc kubenswrapper[4647]: E1128 15:43:54.062799 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"ceilometer-notification-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"proxy-httpd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"]" pod="openstack/ceilometer-0" podUID="332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.091716 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.156728 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lwwt\" (UniqueName: \"kubernetes.io/projected/d7b93065-ae7a-4916-ac48-a672dd1048cb-kube-api-access-6lwwt\") pod \"d7b93065-ae7a-4916-ac48-a672dd1048cb\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.156894 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-sb\") pod \"d7b93065-ae7a-4916-ac48-a672dd1048cb\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.156931 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-nb\") pod \"d7b93065-ae7a-4916-ac48-a672dd1048cb\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.157013 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-swift-storage-0\") pod \"d7b93065-ae7a-4916-ac48-a672dd1048cb\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.157213 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-config\") pod \"d7b93065-ae7a-4916-ac48-a672dd1048cb\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.157266 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-svc\") pod \"d7b93065-ae7a-4916-ac48-a672dd1048cb\" (UID: \"d7b93065-ae7a-4916-ac48-a672dd1048cb\") " Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.167299 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7b93065-ae7a-4916-ac48-a672dd1048cb-kube-api-access-6lwwt" (OuterVolumeSpecName: "kube-api-access-6lwwt") pod "d7b93065-ae7a-4916-ac48-a672dd1048cb" (UID: "d7b93065-ae7a-4916-ac48-a672dd1048cb"). InnerVolumeSpecName "kube-api-access-6lwwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.265369 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lwwt\" (UniqueName: \"kubernetes.io/projected/d7b93065-ae7a-4916-ac48-a672dd1048cb-kube-api-access-6lwwt\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.273104 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-config" (OuterVolumeSpecName: "config") pod "d7b93065-ae7a-4916-ac48-a672dd1048cb" (UID: "d7b93065-ae7a-4916-ac48-a672dd1048cb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.311724 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d7b93065-ae7a-4916-ac48-a672dd1048cb" (UID: "d7b93065-ae7a-4916-ac48-a672dd1048cb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.320786 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d7b93065-ae7a-4916-ac48-a672dd1048cb" (UID: "d7b93065-ae7a-4916-ac48-a672dd1048cb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.329342 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d7b93065-ae7a-4916-ac48-a672dd1048cb" (UID: "d7b93065-ae7a-4916-ac48-a672dd1048cb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.332298 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d7b93065-ae7a-4916-ac48-a672dd1048cb" (UID: "d7b93065-ae7a-4916-ac48-a672dd1048cb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.367966 4647 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.368053 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.368068 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.368080 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.368093 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d7b93065-ae7a-4916-ac48-a672dd1048cb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.632683 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-9d6dbf854-hbzkr" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:60450->10.217.0.158:9311: read: connection reset by peer" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.633023 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-9d6dbf854-hbzkr" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:60442->10.217.0.158:9311: read: connection reset by peer" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.734369 4647 generic.go:334] "Generic (PLEG): container finished" podID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" containerID="7563fe0020ecc9b1f60fa298d03bcd154ddcc5215012322fde02968b070b7939" exitCode=0 Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.734458 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-678f98b656-ftpzl" event={"ID":"51f88ecc-6eae-4026-bb05-ec69c3bc65ee","Type":"ContainerDied","Data":"7563fe0020ecc9b1f60fa298d03bcd154ddcc5215012322fde02968b070b7939"} Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.739028 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" containerName="sg-core" containerID="cri-o://7fe0c4ed14c9d4a69082f3a7b955a5c7329e2d444b6f1d6e93ff95a62234ba54" gracePeriod=30 Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.739472 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.740257 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-2xxj2" event={"ID":"d7b93065-ae7a-4916-ac48-a672dd1048cb","Type":"ContainerDied","Data":"80fb394bae4eb8a00bfc9c13c4630a41ac0273c9ce2baef353367912decc3576"} Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.740291 4647 scope.go:117] "RemoveContainer" containerID="cd4cee753584194ed7fc31e2f6710ccbeede19870d6b88550eead7cbe9337dda" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.741907 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:43:54 crc kubenswrapper[4647]: E1128 15:43:54.753582 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7b93065-ae7a-4916-ac48-a672dd1048cb" containerName="init" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.753686 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7b93065-ae7a-4916-ac48-a672dd1048cb" containerName="init" Nov 28 15:43:54 crc kubenswrapper[4647]: E1128 15:43:54.753765 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7b93065-ae7a-4916-ac48-a672dd1048cb" containerName="dnsmasq-dns" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.753817 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7b93065-ae7a-4916-ac48-a672dd1048cb" containerName="dnsmasq-dns" Nov 28 15:43:54 crc kubenswrapper[4647]: E1128 15:43:54.753879 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f54b294-79b2-4097-9011-f094f66cc705" containerName="cinder-db-sync" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.753928 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f54b294-79b2-4097-9011-f094f66cc705" containerName="cinder-db-sync" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.754192 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f54b294-79b2-4097-9011-f094f66cc705" containerName="cinder-db-sync" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.754268 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7b93065-ae7a-4916-ac48-a672dd1048cb" containerName="dnsmasq-dns" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.755307 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.761810 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.763672 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.763957 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.764086 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-5ddwp" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.786071 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.793992 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-scripts\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.794033 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ln9dx\" (UniqueName: \"kubernetes.io/projected/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-kube-api-access-ln9dx\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.794095 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.794116 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.794163 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.794248 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.810360 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-scbst"] Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.814701 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.892627 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-scbst"] Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.895777 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.895839 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.895862 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.895912 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-scripts\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.895983 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ln9dx\" (UniqueName: \"kubernetes.io/projected/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-kube-api-access-ln9dx\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.897824 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.897927 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.897960 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.898017 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kd8k5\" (UniqueName: \"kubernetes.io/projected/58346aae-6d36-4f76-9eb8-dc5b490c9346-kube-api-access-kd8k5\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.898041 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.898155 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-config\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.898235 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.899580 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.921723 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.924731 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.931130 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-scripts\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.938197 4647 scope.go:117] "RemoveContainer" containerID="64cbf551a77b35e8219e93268918ff9f69138aac8be2f00b220770a780108538" Nov 28 15:43:54 crc kubenswrapper[4647]: I1128 15:43:54.974159 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:54.994834 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ln9dx\" (UniqueName: \"kubernetes.io/projected/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-kube-api-access-ln9dx\") pod \"cinder-scheduler-0\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " pod="openstack/cinder-scheduler-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:54.994948 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-2xxj2"] Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:54.997846 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.003264 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.003339 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.003426 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kd8k5\" (UniqueName: \"kubernetes.io/projected/58346aae-6d36-4f76-9eb8-dc5b490c9346-kube-api-access-kd8k5\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.003462 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-config\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.003547 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.003565 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.029031 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-2xxj2"] Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.034233 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.038998 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.061146 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-config\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.061971 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.068156 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.074454 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kd8k5\" (UniqueName: \"kubernetes.io/projected/58346aae-6d36-4f76-9eb8-dc5b490c9346-kube-api-access-kd8k5\") pod \"dnsmasq-dns-6bb4fc677f-scbst\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.093324 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.104996 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-config\") pod \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.105072 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-ovndb-tls-certs\") pod \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.105107 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-combined-ca-bundle\") pod \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.105309 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-httpd-config\") pod \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.105349 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kthb\" (UniqueName: \"kubernetes.io/projected/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-kube-api-access-8kthb\") pod \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\" (UID: \"51f88ecc-6eae-4026-bb05-ec69c3bc65ee\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.132841 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "51f88ecc-6eae-4026-bb05-ec69c3bc65ee" (UID: "51f88ecc-6eae-4026-bb05-ec69c3bc65ee"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.166828 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-kube-api-access-8kthb" (OuterVolumeSpecName: "kube-api-access-8kthb") pod "51f88ecc-6eae-4026-bb05-ec69c3bc65ee" (UID: "51f88ecc-6eae-4026-bb05-ec69c3bc65ee"). InnerVolumeSpecName "kube-api-access-8kthb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.170066 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:43:55 crc kubenswrapper[4647]: E1128 15:43:55.170504 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" containerName="neutron-api" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.170521 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" containerName="neutron-api" Nov 28 15:43:55 crc kubenswrapper[4647]: E1128 15:43:55.170531 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" containerName="neutron-httpd" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.170537 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" containerName="neutron-httpd" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.170699 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" containerName="neutron-httpd" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.170717 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" containerName="neutron-api" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.181063 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.191934 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.210583 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.210631 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24b1a779-475b-428e-968f-6a68329a10ee-etc-machine-id\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.210651 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-scripts\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.210690 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24b1a779-475b-428e-968f-6a68329a10ee-logs\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.210738 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.210806 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data-custom\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.210887 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvw89\" (UniqueName: \"kubernetes.io/projected/24b1a779-475b-428e-968f-6a68329a10ee-kube-api-access-mvw89\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.210993 4647 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.211005 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kthb\" (UniqueName: \"kubernetes.io/projected/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-kube-api-access-8kthb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.261722 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.294221 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.330405 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvw89\" (UniqueName: \"kubernetes.io/projected/24b1a779-475b-428e-968f-6a68329a10ee-kube-api-access-mvw89\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.330499 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.330534 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24b1a779-475b-428e-968f-6a68329a10ee-etc-machine-id\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.330561 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-scripts\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.330620 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24b1a779-475b-428e-968f-6a68329a10ee-logs\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.330687 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.330789 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data-custom\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.334689 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24b1a779-475b-428e-968f-6a68329a10ee-logs\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.334779 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24b1a779-475b-428e-968f-6a68329a10ee-etc-machine-id\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.341103 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.348211 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-scripts\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.348947 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "51f88ecc-6eae-4026-bb05-ec69c3bc65ee" (UID: "51f88ecc-6eae-4026-bb05-ec69c3bc65ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.349172 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "51f88ecc-6eae-4026-bb05-ec69c3bc65ee" (UID: "51f88ecc-6eae-4026-bb05-ec69c3bc65ee"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.349572 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data-custom\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.352277 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.355902 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvw89\" (UniqueName: \"kubernetes.io/projected/24b1a779-475b-428e-968f-6a68329a10ee-kube-api-access-mvw89\") pod \"cinder-api-0\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.369691 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-config" (OuterVolumeSpecName: "config") pod "51f88ecc-6eae-4026-bb05-ec69c3bc65ee" (UID: "51f88ecc-6eae-4026-bb05-ec69c3bc65ee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.433026 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.433053 4647 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.433064 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51f88ecc-6eae-4026-bb05-ec69c3bc65ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.572530 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.688452 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.740320 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data\") pod \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.740386 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data-custom\") pod \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.740733 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-combined-ca-bundle\") pod \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.740792 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vg67r\" (UniqueName: \"kubernetes.io/projected/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-kube-api-access-vg67r\") pod \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.740988 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-logs\") pod \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\" (UID: \"6c015d44-7f9b-482e-8f6d-6efba45aa6ea\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.744431 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-logs" (OuterVolumeSpecName: "logs") pod "6c015d44-7f9b-482e-8f6d-6efba45aa6ea" (UID: "6c015d44-7f9b-482e-8f6d-6efba45aa6ea"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.763611 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6c015d44-7f9b-482e-8f6d-6efba45aa6ea" (UID: "6c015d44-7f9b-482e-8f6d-6efba45aa6ea"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.764967 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-kube-api-access-vg67r" (OuterVolumeSpecName: "kube-api-access-vg67r") pod "6c015d44-7f9b-482e-8f6d-6efba45aa6ea" (UID: "6c015d44-7f9b-482e-8f6d-6efba45aa6ea"). InnerVolumeSpecName "kube-api-access-vg67r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.825613 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c015d44-7f9b-482e-8f6d-6efba45aa6ea" (UID: "6c015d44-7f9b-482e-8f6d-6efba45aa6ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.836711 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.844262 4647 generic.go:334] "Generic (PLEG): container finished" podID="332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" containerID="7fe0c4ed14c9d4a69082f3a7b955a5c7329e2d444b6f1d6e93ff95a62234ba54" exitCode=2 Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.844330 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2","Type":"ContainerDied","Data":"7fe0c4ed14c9d4a69082f3a7b955a5c7329e2d444b6f1d6e93ff95a62234ba54"} Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.844363 4647 scope.go:117] "RemoveContainer" containerID="7fe0c4ed14c9d4a69082f3a7b955a5c7329e2d444b6f1d6e93ff95a62234ba54" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.845619 4647 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.845632 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.845641 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vg67r\" (UniqueName: \"kubernetes.io/projected/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-kube-api-access-vg67r\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.845652 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.865173 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-678f98b656-ftpzl" event={"ID":"51f88ecc-6eae-4026-bb05-ec69c3bc65ee","Type":"ContainerDied","Data":"e7e058b0788eb4840c897ff9b317a410f7c28a2e2a3028d75cd74c345af2aa81"} Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.865308 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-678f98b656-ftpzl" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.897749 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data" (OuterVolumeSpecName: "config-data") pod "6c015d44-7f9b-482e-8f6d-6efba45aa6ea" (UID: "6c015d44-7f9b-482e-8f6d-6efba45aa6ea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.919609 4647 scope.go:117] "RemoveContainer" containerID="533eafe7adce88e49f74eb427938363d9b928e13c2700cbed87f0eab78c3a711" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.919665 4647 generic.go:334] "Generic (PLEG): container finished" podID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerID="983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500" exitCode=0 Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.919771 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9d6dbf854-hbzkr" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.919789 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9d6dbf854-hbzkr" event={"ID":"6c015d44-7f9b-482e-8f6d-6efba45aa6ea","Type":"ContainerDied","Data":"983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500"} Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.919827 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9d6dbf854-hbzkr" event={"ID":"6c015d44-7f9b-482e-8f6d-6efba45aa6ea","Type":"ContainerDied","Data":"90d04a661d02ac7f9b2d9e760910f415a6317becdca9e0cac9d82599ec550aa6"} Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.952839 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-run-httpd\") pod \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.952902 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-log-httpd\") pod \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.953109 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwg9m\" (UniqueName: \"kubernetes.io/projected/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-kube-api-access-zwg9m\") pod \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.953137 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-sg-core-conf-yaml\") pod \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.953156 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-config-data\") pod \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.953245 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-scripts\") pod \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.953492 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-combined-ca-bundle\") pod \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\" (UID: \"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2\") " Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.953997 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c015d44-7f9b-482e-8f6d-6efba45aa6ea-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.955483 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" (UID: "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.959283 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6565667588-kf4hg" event={"ID":"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407","Type":"ContainerStarted","Data":"bf74693e22cec37078f0c93667eba2e47d50d05737043c8b5533a91584f442d5"} Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.959326 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6565667588-kf4hg" event={"ID":"cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407","Type":"ContainerStarted","Data":"7863ad2c2bfaf4f4a294a9bc6b964f3b4c381fd05ae8c6a786971760c2c56828"} Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.960457 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.961030 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6565667588-kf4hg" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.961115 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" (UID: "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.969979 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-kube-api-access-zwg9m" (OuterVolumeSpecName: "kube-api-access-zwg9m") pod "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" (UID: "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2"). InnerVolumeSpecName "kube-api-access-zwg9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.980948 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-scripts" (OuterVolumeSpecName: "scripts") pod "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" (UID: "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.986470 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:43:55 crc kubenswrapper[4647]: I1128 15:43:55.997267 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" (UID: "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.008280 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-config-data" (OuterVolumeSpecName: "config-data") pod "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" (UID: "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.048023 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-678f98b656-ftpzl"] Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.053586 4647 scope.go:117] "RemoveContainer" containerID="7563fe0020ecc9b1f60fa298d03bcd154ddcc5215012322fde02968b070b7939" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.056968 4647 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.057000 4647 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.057012 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwg9m\" (UniqueName: \"kubernetes.io/projected/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-kube-api-access-zwg9m\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.057024 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.057036 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.057046 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.063110 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-678f98b656-ftpzl"] Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.070775 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6565667588-kf4hg" podStartSLOduration=15.070742561 podStartE2EDuration="15.070742561s" podCreationTimestamp="2025-11-28 15:43:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:56.005257078 +0000 UTC m=+1165.852863519" watchObservedRunningTime="2025-11-28 15:43:56.070742561 +0000 UTC m=+1165.918348982" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.070825 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" (UID: "332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.128374 4647 scope.go:117] "RemoveContainer" containerID="983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.137701 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-9d6dbf854-hbzkr"] Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.158654 4647 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.179890 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-9d6dbf854-hbzkr"] Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.200350 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-scbst"] Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.200775 4647 scope.go:117] "RemoveContainer" containerID="e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.354346 4647 scope.go:117] "RemoveContainer" containerID="983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500" Nov 28 15:43:56 crc kubenswrapper[4647]: E1128 15:43:56.361706 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500\": container with ID starting with 983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500 not found: ID does not exist" containerID="983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.361750 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500"} err="failed to get container status \"983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500\": rpc error: code = NotFound desc = could not find container \"983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500\": container with ID starting with 983b429cde0096c2a6f031c3337a4a13b2fa63dc60b4f0cc50d8658ffeca2500 not found: ID does not exist" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.361774 4647 scope.go:117] "RemoveContainer" containerID="e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c" Nov 28 15:43:56 crc kubenswrapper[4647]: E1128 15:43:56.363498 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c\": container with ID starting with e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c not found: ID does not exist" containerID="e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.363529 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c"} err="failed to get container status \"e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c\": rpc error: code = NotFound desc = could not find container \"e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c\": container with ID starting with e73e3b2308b7a433b8b3bbcb9d47793704ef4591be0bfff402fe6c00b1129e5c not found: ID does not exist" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.462094 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51f88ecc-6eae-4026-bb05-ec69c3bc65ee" path="/var/lib/kubelet/pods/51f88ecc-6eae-4026-bb05-ec69c3bc65ee/volumes" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.462828 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" path="/var/lib/kubelet/pods/6c015d44-7f9b-482e-8f6d-6efba45aa6ea/volumes" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.463370 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7b93065-ae7a-4916-ac48-a672dd1048cb" path="/var/lib/kubelet/pods/d7b93065-ae7a-4916-ac48-a672dd1048cb/volumes" Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.464562 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.989656 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698","Type":"ContainerStarted","Data":"359250720d615d6f3bac504f234ccf3453188e95ebf9f68f0cbf3fc444bd53aa"} Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.996265 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"24b1a779-475b-428e-968f-6a68329a10ee","Type":"ContainerStarted","Data":"42f9bb9b64c68bc45dbac846ffb6e16429ca054cc627c0816f3d7e96d32cfaf6"} Nov 28 15:43:56 crc kubenswrapper[4647]: I1128 15:43:56.997924 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" event={"ID":"58346aae-6d36-4f76-9eb8-dc5b490c9346","Type":"ContainerStarted","Data":"70cc5fdbc688340e9460048ae75f8a334fbf9a8b3b2f428edf45c6e66ba64ee6"} Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.006803 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2","Type":"ContainerDied","Data":"b75fd2003aa723c41dd6c8fc8661836bb5b3a6ede97c2fadbe036bec2deff1aa"} Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.006964 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.083156 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.092671 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.119087 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:57 crc kubenswrapper[4647]: E1128 15:43:57.120038 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" containerName="sg-core" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.120072 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" containerName="sg-core" Nov 28 15:43:57 crc kubenswrapper[4647]: E1128 15:43:57.120085 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerName="barbican-api" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.120091 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerName="barbican-api" Nov 28 15:43:57 crc kubenswrapper[4647]: E1128 15:43:57.120125 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerName="barbican-api-log" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.120131 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerName="barbican-api-log" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.120305 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerName="barbican-api" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.120318 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c015d44-7f9b-482e-8f6d-6efba45aa6ea" containerName="barbican-api-log" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.120333 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" containerName="sg-core" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.121896 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.125449 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.125655 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.151047 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.284003 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-run-httpd\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.284369 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.284397 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnzp4\" (UniqueName: \"kubernetes.io/projected/e0748985-5b7e-430b-80f8-7b09e00f5f91-kube-api-access-wnzp4\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.284428 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-log-httpd\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.284496 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-scripts\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.284549 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-config-data\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.284563 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.386982 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-run-httpd\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.387046 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.387076 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnzp4\" (UniqueName: \"kubernetes.io/projected/e0748985-5b7e-430b-80f8-7b09e00f5f91-kube-api-access-wnzp4\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.387101 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-log-httpd\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.387462 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-run-httpd\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.387640 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-scripts\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.387693 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-log-httpd\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.388296 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-config-data\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.388322 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.396477 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-config-data\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.397093 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-scripts\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.404310 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.405219 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.413567 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnzp4\" (UniqueName: \"kubernetes.io/projected/e0748985-5b7e-430b-80f8-7b09e00f5f91-kube-api-access-wnzp4\") pod \"ceilometer-0\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.444280 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:43:57 crc kubenswrapper[4647]: I1128 15:43:57.958580 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:57 crc kubenswrapper[4647]: W1128 15:43:57.974683 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0748985_5b7e_430b_80f8_7b09e00f5f91.slice/crio-5f2d55c412d76be367074b464f8ce2beba325e3ffc61e738e74d51481ed601b4 WatchSource:0}: Error finding container 5f2d55c412d76be367074b464f8ce2beba325e3ffc61e738e74d51481ed601b4: Status 404 returned error can't find the container with id 5f2d55c412d76be367074b464f8ce2beba325e3ffc61e738e74d51481ed601b4 Nov 28 15:43:58 crc kubenswrapper[4647]: I1128 15:43:58.017782 4647 generic.go:334] "Generic (PLEG): container finished" podID="58346aae-6d36-4f76-9eb8-dc5b490c9346" containerID="4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2" exitCode=0 Nov 28 15:43:58 crc kubenswrapper[4647]: I1128 15:43:58.017889 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" event={"ID":"58346aae-6d36-4f76-9eb8-dc5b490c9346","Type":"ContainerDied","Data":"4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2"} Nov 28 15:43:58 crc kubenswrapper[4647]: I1128 15:43:58.020735 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e0748985-5b7e-430b-80f8-7b09e00f5f91","Type":"ContainerStarted","Data":"5f2d55c412d76be367074b464f8ce2beba325e3ffc61e738e74d51481ed601b4"} Nov 28 15:43:58 crc kubenswrapper[4647]: I1128 15:43:58.268460 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:43:58 crc kubenswrapper[4647]: I1128 15:43:58.410241 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2" path="/var/lib/kubelet/pods/332c2c0e-dd01-4aad-8bb6-d1b9ef056cc2/volumes" Nov 28 15:43:59 crc kubenswrapper[4647]: I1128 15:43:59.037399 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"24b1a779-475b-428e-968f-6a68329a10ee","Type":"ContainerStarted","Data":"84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5"} Nov 28 15:43:59 crc kubenswrapper[4647]: I1128 15:43:59.040122 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e0748985-5b7e-430b-80f8-7b09e00f5f91","Type":"ContainerStarted","Data":"a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9"} Nov 28 15:43:59 crc kubenswrapper[4647]: I1128 15:43:59.044993 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" event={"ID":"58346aae-6d36-4f76-9eb8-dc5b490c9346","Type":"ContainerStarted","Data":"b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4"} Nov 28 15:43:59 crc kubenswrapper[4647]: I1128 15:43:59.047381 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:43:59 crc kubenswrapper[4647]: I1128 15:43:59.071850 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" podStartSLOduration=5.071819052 podStartE2EDuration="5.071819052s" podCreationTimestamp="2025-11-28 15:43:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:59.069137871 +0000 UTC m=+1168.916744282" watchObservedRunningTime="2025-11-28 15:43:59.071819052 +0000 UTC m=+1168.919425473" Nov 28 15:43:59 crc kubenswrapper[4647]: I1128 15:43:59.511981 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-84947f5948-ml477" Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.078716 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698","Type":"ContainerStarted","Data":"f98af54805ac902d8e3ec0277313dc1c457e621a523973404955f66da3147065"} Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.081237 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"24b1a779-475b-428e-968f-6a68329a10ee","Type":"ContainerStarted","Data":"a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73"} Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.081630 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.082622 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="24b1a779-475b-428e-968f-6a68329a10ee" containerName="cinder-api" containerID="cri-o://a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73" gracePeriod=30 Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.082657 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="24b1a779-475b-428e-968f-6a68329a10ee" containerName="cinder-api-log" containerID="cri-o://84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5" gracePeriod=30 Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.088109 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e0748985-5b7e-430b-80f8-7b09e00f5f91","Type":"ContainerStarted","Data":"7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036"} Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.108999 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.1089781 podStartE2EDuration="5.1089781s" podCreationTimestamp="2025-11-28 15:43:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:00.106837804 +0000 UTC m=+1169.954444225" watchObservedRunningTime="2025-11-28 15:44:00.1089781 +0000 UTC m=+1169.956584521" Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.910353 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.980573 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-combined-ca-bundle\") pod \"24b1a779-475b-428e-968f-6a68329a10ee\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.980633 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24b1a779-475b-428e-968f-6a68329a10ee-etc-machine-id\") pod \"24b1a779-475b-428e-968f-6a68329a10ee\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.980669 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvw89\" (UniqueName: \"kubernetes.io/projected/24b1a779-475b-428e-968f-6a68329a10ee-kube-api-access-mvw89\") pod \"24b1a779-475b-428e-968f-6a68329a10ee\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.980702 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24b1a779-475b-428e-968f-6a68329a10ee-logs\") pod \"24b1a779-475b-428e-968f-6a68329a10ee\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.980734 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data-custom\") pod \"24b1a779-475b-428e-968f-6a68329a10ee\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.980772 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-scripts\") pod \"24b1a779-475b-428e-968f-6a68329a10ee\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.980838 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data\") pod \"24b1a779-475b-428e-968f-6a68329a10ee\" (UID: \"24b1a779-475b-428e-968f-6a68329a10ee\") " Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.982326 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24b1a779-475b-428e-968f-6a68329a10ee-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "24b1a779-475b-428e-968f-6a68329a10ee" (UID: "24b1a779-475b-428e-968f-6a68329a10ee"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.982712 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24b1a779-475b-428e-968f-6a68329a10ee-logs" (OuterVolumeSpecName: "logs") pod "24b1a779-475b-428e-968f-6a68329a10ee" (UID: "24b1a779-475b-428e-968f-6a68329a10ee"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.986686 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-scripts" (OuterVolumeSpecName: "scripts") pod "24b1a779-475b-428e-968f-6a68329a10ee" (UID: "24b1a779-475b-428e-968f-6a68329a10ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.991165 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24b1a779-475b-428e-968f-6a68329a10ee-kube-api-access-mvw89" (OuterVolumeSpecName: "kube-api-access-mvw89") pod "24b1a779-475b-428e-968f-6a68329a10ee" (UID: "24b1a779-475b-428e-968f-6a68329a10ee"). InnerVolumeSpecName "kube-api-access-mvw89". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:00 crc kubenswrapper[4647]: I1128 15:44:00.999098 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "24b1a779-475b-428e-968f-6a68329a10ee" (UID: "24b1a779-475b-428e-968f-6a68329a10ee"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.029583 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "24b1a779-475b-428e-968f-6a68329a10ee" (UID: "24b1a779-475b-428e-968f-6a68329a10ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.060886 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data" (OuterVolumeSpecName: "config-data") pod "24b1a779-475b-428e-968f-6a68329a10ee" (UID: "24b1a779-475b-428e-968f-6a68329a10ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.082139 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.082169 4647 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24b1a779-475b-428e-968f-6a68329a10ee-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.082179 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvw89\" (UniqueName: \"kubernetes.io/projected/24b1a779-475b-428e-968f-6a68329a10ee-kube-api-access-mvw89\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.082190 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24b1a779-475b-428e-968f-6a68329a10ee-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.082216 4647 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.082227 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.082234 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24b1a779-475b-428e-968f-6a68329a10ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.097934 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698","Type":"ContainerStarted","Data":"01e74c52fb46be2f25b6e276b0c1ddd8d7c230b767afc82794a56206be4802c8"} Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.101781 4647 generic.go:334] "Generic (PLEG): container finished" podID="24b1a779-475b-428e-968f-6a68329a10ee" containerID="a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73" exitCode=0 Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.101824 4647 generic.go:334] "Generic (PLEG): container finished" podID="24b1a779-475b-428e-968f-6a68329a10ee" containerID="84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5" exitCode=143 Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.101884 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.101933 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"24b1a779-475b-428e-968f-6a68329a10ee","Type":"ContainerDied","Data":"a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73"} Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.102003 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"24b1a779-475b-428e-968f-6a68329a10ee","Type":"ContainerDied","Data":"84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5"} Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.102021 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"24b1a779-475b-428e-968f-6a68329a10ee","Type":"ContainerDied","Data":"42f9bb9b64c68bc45dbac846ffb6e16429ca054cc627c0816f3d7e96d32cfaf6"} Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.102047 4647 scope.go:117] "RemoveContainer" containerID="a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.106159 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e0748985-5b7e-430b-80f8-7b09e00f5f91","Type":"ContainerStarted","Data":"177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e"} Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.122205 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.5370835419999995 podStartE2EDuration="7.122181627s" podCreationTimestamp="2025-11-28 15:43:54 +0000 UTC" firstStartedPulling="2025-11-28 15:43:56.053842476 +0000 UTC m=+1165.901448897" lastFinishedPulling="2025-11-28 15:43:58.638940551 +0000 UTC m=+1168.486546982" observedRunningTime="2025-11-28 15:44:01.120450821 +0000 UTC m=+1170.968057242" watchObservedRunningTime="2025-11-28 15:44:01.122181627 +0000 UTC m=+1170.969788048" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.137051 4647 scope.go:117] "RemoveContainer" containerID="84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.159111 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.171651 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.192754 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:44:01 crc kubenswrapper[4647]: E1128 15:44:01.193240 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24b1a779-475b-428e-968f-6a68329a10ee" containerName="cinder-api" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.193259 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="24b1a779-475b-428e-968f-6a68329a10ee" containerName="cinder-api" Nov 28 15:44:01 crc kubenswrapper[4647]: E1128 15:44:01.193280 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24b1a779-475b-428e-968f-6a68329a10ee" containerName="cinder-api-log" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.193286 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="24b1a779-475b-428e-968f-6a68329a10ee" containerName="cinder-api-log" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.193486 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="24b1a779-475b-428e-968f-6a68329a10ee" containerName="cinder-api" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.193513 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="24b1a779-475b-428e-968f-6a68329a10ee" containerName="cinder-api-log" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.194512 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.203197 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.203559 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.204147 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.241749 4647 scope.go:117] "RemoveContainer" containerID="a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73" Nov 28 15:44:01 crc kubenswrapper[4647]: E1128 15:44:01.242325 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73\": container with ID starting with a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73 not found: ID does not exist" containerID="a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.242362 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73"} err="failed to get container status \"a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73\": rpc error: code = NotFound desc = could not find container \"a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73\": container with ID starting with a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73 not found: ID does not exist" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.244688 4647 scope.go:117] "RemoveContainer" containerID="84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5" Nov 28 15:44:01 crc kubenswrapper[4647]: E1128 15:44:01.250093 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5\": container with ID starting with 84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5 not found: ID does not exist" containerID="84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.250163 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5"} err="failed to get container status \"84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5\": rpc error: code = NotFound desc = could not find container \"84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5\": container with ID starting with 84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5 not found: ID does not exist" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.250198 4647 scope.go:117] "RemoveContainer" containerID="a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.253766 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73"} err="failed to get container status \"a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73\": rpc error: code = NotFound desc = could not find container \"a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73\": container with ID starting with a967998c23783f8fb75e2824c77e5c8f12116d5e4ad2cb70e7ab6e5f9d3e5d73 not found: ID does not exist" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.253810 4647 scope.go:117] "RemoveContainer" containerID="84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.276171 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5"} err="failed to get container status \"84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5\": rpc error: code = NotFound desc = could not find container \"84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5\": container with ID starting with 84439a23892ab09b3e243e392bc3b76adeb1a90aac15f662daa184394f43c0f5 not found: ID does not exist" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.287996 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-scripts\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.288290 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk89x\" (UniqueName: \"kubernetes.io/projected/0f64718e-70bf-4d38-8c02-0523053f5e99-kube-api-access-qk89x\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.288399 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.288526 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f64718e-70bf-4d38-8c02-0523053f5e99-logs\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.288610 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-config-data-custom\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.288706 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-config-data\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.288860 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0f64718e-70bf-4d38-8c02-0523053f5e99-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.288963 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-public-tls-certs\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.289056 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.308090 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.391123 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-config-data-custom\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.391193 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-config-data\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.391272 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0f64718e-70bf-4d38-8c02-0523053f5e99-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.391298 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-public-tls-certs\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.391337 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.391381 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-scripts\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.391430 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk89x\" (UniqueName: \"kubernetes.io/projected/0f64718e-70bf-4d38-8c02-0523053f5e99-kube-api-access-qk89x\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.391480 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.391515 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f64718e-70bf-4d38-8c02-0523053f5e99-logs\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.391553 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0f64718e-70bf-4d38-8c02-0523053f5e99-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.393304 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f64718e-70bf-4d38-8c02-0523053f5e99-logs\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.401878 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-scripts\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.402053 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.402343 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.402911 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-public-tls-certs\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.409278 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-config-data-custom\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.413969 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f64718e-70bf-4d38-8c02-0523053f5e99-config-data\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.416160 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk89x\" (UniqueName: \"kubernetes.io/projected/0f64718e-70bf-4d38-8c02-0523053f5e99-kube-api-access-qk89x\") pod \"cinder-api-0\" (UID: \"0f64718e-70bf-4d38-8c02-0523053f5e99\") " pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.531442 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:44:01 crc kubenswrapper[4647]: I1128 15:44:01.963206 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-84947f5948-ml477" Nov 28 15:44:02 crc kubenswrapper[4647]: I1128 15:44:02.057718 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:44:02 crc kubenswrapper[4647]: I1128 15:44:02.121528 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0f64718e-70bf-4d38-8c02-0523053f5e99","Type":"ContainerStarted","Data":"aecc9a2b2c66808cb37421fc6be71374053dcfdf9dcbb7ad08f18d5036c42b25"} Nov 28 15:44:02 crc kubenswrapper[4647]: I1128 15:44:02.407275 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24b1a779-475b-428e-968f-6a68329a10ee" path="/var/lib/kubelet/pods/24b1a779-475b-428e-968f-6a68329a10ee/volumes" Nov 28 15:44:03 crc kubenswrapper[4647]: I1128 15:44:03.150548 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e0748985-5b7e-430b-80f8-7b09e00f5f91","Type":"ContainerStarted","Data":"be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a"} Nov 28 15:44:03 crc kubenswrapper[4647]: I1128 15:44:03.153044 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:44:03 crc kubenswrapper[4647]: I1128 15:44:03.157777 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0f64718e-70bf-4d38-8c02-0523053f5e99","Type":"ContainerStarted","Data":"1cfa8777732c0a741fceeb43ddc937a5fd33def417f180ce55d3c32421584664"} Nov 28 15:44:04 crc kubenswrapper[4647]: I1128 15:44:04.177526 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0f64718e-70bf-4d38-8c02-0523053f5e99","Type":"ContainerStarted","Data":"f86d89a4d41050dd3d72d8b85036d9141c86a1527bbb7a18258a3eae0a165723"} Nov 28 15:44:04 crc kubenswrapper[4647]: I1128 15:44:04.178583 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 15:44:04 crc kubenswrapper[4647]: I1128 15:44:04.201359 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.6801629829999998 podStartE2EDuration="7.201329562s" podCreationTimestamp="2025-11-28 15:43:57 +0000 UTC" firstStartedPulling="2025-11-28 15:43:57.976692294 +0000 UTC m=+1167.824298715" lastFinishedPulling="2025-11-28 15:44:02.497858873 +0000 UTC m=+1172.345465294" observedRunningTime="2025-11-28 15:44:03.18224175 +0000 UTC m=+1173.029848171" watchObservedRunningTime="2025-11-28 15:44:04.201329562 +0000 UTC m=+1174.048935993" Nov 28 15:44:04 crc kubenswrapper[4647]: I1128 15:44:04.201576 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.201570588 podStartE2EDuration="3.201570588s" podCreationTimestamp="2025-11-28 15:44:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:04.195999461 +0000 UTC m=+1174.043605892" watchObservedRunningTime="2025-11-28 15:44:04.201570588 +0000 UTC m=+1174.049177019" Nov 28 15:44:05 crc kubenswrapper[4647]: I1128 15:44:05.093836 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 15:44:05 crc kubenswrapper[4647]: I1128 15:44:05.263510 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:44:05 crc kubenswrapper[4647]: I1128 15:44:05.353896 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-wbs2b"] Nov 28 15:44:05 crc kubenswrapper[4647]: I1128 15:44:05.354175 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" podUID="3568d112-eb07-4349-83bc-d42c2e5ec135" containerName="dnsmasq-dns" containerID="cri-o://4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98" gracePeriod=10 Nov 28 15:44:05 crc kubenswrapper[4647]: I1128 15:44:05.476306 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 15:44:05 crc kubenswrapper[4647]: I1128 15:44:05.551534 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:44:05 crc kubenswrapper[4647]: I1128 15:44:05.964107 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.032099 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-config\") pod \"3568d112-eb07-4349-83bc-d42c2e5ec135\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.032234 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-sb\") pod \"3568d112-eb07-4349-83bc-d42c2e5ec135\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.032260 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-swift-storage-0\") pod \"3568d112-eb07-4349-83bc-d42c2e5ec135\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.033296 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cn5lv\" (UniqueName: \"kubernetes.io/projected/3568d112-eb07-4349-83bc-d42c2e5ec135-kube-api-access-cn5lv\") pod \"3568d112-eb07-4349-83bc-d42c2e5ec135\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.033378 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-nb\") pod \"3568d112-eb07-4349-83bc-d42c2e5ec135\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.033567 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-svc\") pod \"3568d112-eb07-4349-83bc-d42c2e5ec135\" (UID: \"3568d112-eb07-4349-83bc-d42c2e5ec135\") " Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.060293 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3568d112-eb07-4349-83bc-d42c2e5ec135-kube-api-access-cn5lv" (OuterVolumeSpecName: "kube-api-access-cn5lv") pod "3568d112-eb07-4349-83bc-d42c2e5ec135" (UID: "3568d112-eb07-4349-83bc-d42c2e5ec135"). InnerVolumeSpecName "kube-api-access-cn5lv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.137259 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cn5lv\" (UniqueName: \"kubernetes.io/projected/3568d112-eb07-4349-83bc-d42c2e5ec135-kube-api-access-cn5lv\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.154779 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3568d112-eb07-4349-83bc-d42c2e5ec135" (UID: "3568d112-eb07-4349-83bc-d42c2e5ec135"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.165298 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3568d112-eb07-4349-83bc-d42c2e5ec135" (UID: "3568d112-eb07-4349-83bc-d42c2e5ec135"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.172875 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-config" (OuterVolumeSpecName: "config") pod "3568d112-eb07-4349-83bc-d42c2e5ec135" (UID: "3568d112-eb07-4349-83bc-d42c2e5ec135"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.181258 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3568d112-eb07-4349-83bc-d42c2e5ec135" (UID: "3568d112-eb07-4349-83bc-d42c2e5ec135"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.205052 4647 generic.go:334] "Generic (PLEG): container finished" podID="3568d112-eb07-4349-83bc-d42c2e5ec135" containerID="4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98" exitCode=0 Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.205328 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" containerName="cinder-scheduler" containerID="cri-o://f98af54805ac902d8e3ec0277313dc1c457e621a523973404955f66da3147065" gracePeriod=30 Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.205829 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" containerName="probe" containerID="cri-o://01e74c52fb46be2f25b6e276b0c1ddd8d7c230b767afc82794a56206be4802c8" gracePeriod=30 Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.206000 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.208661 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" event={"ID":"3568d112-eb07-4349-83bc-d42c2e5ec135","Type":"ContainerDied","Data":"4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98"} Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.208746 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-wbs2b" event={"ID":"3568d112-eb07-4349-83bc-d42c2e5ec135","Type":"ContainerDied","Data":"3835cacb51f83b075dbde9ef395b8ecbb6ee63292443dc22ef50ee935f5f4b2e"} Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.208773 4647 scope.go:117] "RemoveContainer" containerID="4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.220507 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3568d112-eb07-4349-83bc-d42c2e5ec135" (UID: "3568d112-eb07-4349-83bc-d42c2e5ec135"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.244174 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.244204 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.244215 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.244226 4647 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.244238 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3568d112-eb07-4349-83bc-d42c2e5ec135-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.255769 4647 scope.go:117] "RemoveContainer" containerID="9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.283458 4647 scope.go:117] "RemoveContainer" containerID="4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98" Nov 28 15:44:06 crc kubenswrapper[4647]: E1128 15:44:06.283819 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98\": container with ID starting with 4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98 not found: ID does not exist" containerID="4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.283849 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98"} err="failed to get container status \"4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98\": rpc error: code = NotFound desc = could not find container \"4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98\": container with ID starting with 4f8c71fe5e162c93645e2ca2b21bac3189f39df22603168bd87de95ce3977b98 not found: ID does not exist" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.283869 4647 scope.go:117] "RemoveContainer" containerID="9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf" Nov 28 15:44:06 crc kubenswrapper[4647]: E1128 15:44:06.284070 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf\": container with ID starting with 9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf not found: ID does not exist" containerID="9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.284086 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf"} err="failed to get container status \"9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf\": rpc error: code = NotFound desc = could not find container \"9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf\": container with ID starting with 9de305937ac27dc2ac0ba537c9f4fc2ead8789c50946083016b92ef9bc662acf not found: ID does not exist" Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.550614 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-wbs2b"] Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.560097 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-wbs2b"] Nov 28 15:44:06 crc kubenswrapper[4647]: I1128 15:44:06.796127 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-8567d4b5-5ss7w" Nov 28 15:44:07 crc kubenswrapper[4647]: I1128 15:44:07.222257 4647 generic.go:334] "Generic (PLEG): container finished" podID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" containerID="01e74c52fb46be2f25b6e276b0c1ddd8d7c230b767afc82794a56206be4802c8" exitCode=0 Nov 28 15:44:07 crc kubenswrapper[4647]: I1128 15:44:07.222333 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698","Type":"ContainerDied","Data":"01e74c52fb46be2f25b6e276b0c1ddd8d7c230b767afc82794a56206be4802c8"} Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.272912 4647 generic.go:334] "Generic (PLEG): container finished" podID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" containerID="f98af54805ac902d8e3ec0277313dc1c457e621a523973404955f66da3147065" exitCode=0 Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.274147 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698","Type":"ContainerDied","Data":"f98af54805ac902d8e3ec0277313dc1c457e621a523973404955f66da3147065"} Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.452271 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3568d112-eb07-4349-83bc-d42c2e5ec135" path="/var/lib/kubelet/pods/3568d112-eb07-4349-83bc-d42c2e5ec135/volumes" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.605519 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.715681 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data-custom\") pod \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.715784 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ln9dx\" (UniqueName: \"kubernetes.io/projected/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-kube-api-access-ln9dx\") pod \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.715810 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-combined-ca-bundle\") pod \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.716064 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-etc-machine-id\") pod \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.716092 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data\") pod \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.716188 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-scripts\") pod \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\" (UID: \"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698\") " Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.719314 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" (UID: "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.733500 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-kube-api-access-ln9dx" (OuterVolumeSpecName: "kube-api-access-ln9dx") pod "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" (UID: "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698"). InnerVolumeSpecName "kube-api-access-ln9dx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.749567 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" (UID: "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.751570 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-scripts" (OuterVolumeSpecName: "scripts") pod "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" (UID: "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.799893 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" (UID: "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.827918 4647 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.827950 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.827961 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ln9dx\" (UniqueName: \"kubernetes.io/projected/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-kube-api-access-ln9dx\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.827971 4647 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.827979 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.880590 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data" (OuterVolumeSpecName: "config-data") pod "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" (UID: "c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.881881 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 15:44:08 crc kubenswrapper[4647]: E1128 15:44:08.882468 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3568d112-eb07-4349-83bc-d42c2e5ec135" containerName="init" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.882498 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3568d112-eb07-4349-83bc-d42c2e5ec135" containerName="init" Nov 28 15:44:08 crc kubenswrapper[4647]: E1128 15:44:08.882530 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3568d112-eb07-4349-83bc-d42c2e5ec135" containerName="dnsmasq-dns" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.882537 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3568d112-eb07-4349-83bc-d42c2e5ec135" containerName="dnsmasq-dns" Nov 28 15:44:08 crc kubenswrapper[4647]: E1128 15:44:08.882569 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" containerName="probe" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.882575 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" containerName="probe" Nov 28 15:44:08 crc kubenswrapper[4647]: E1128 15:44:08.882595 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" containerName="cinder-scheduler" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.882603 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" containerName="cinder-scheduler" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.882797 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" containerName="probe" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.882844 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" containerName="cinder-scheduler" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.882862 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="3568d112-eb07-4349-83bc-d42c2e5ec135" containerName="dnsmasq-dns" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.884114 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.887901 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.888042 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.888127 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-gkhjl" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.900470 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.929965 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c765a2ba-ed3c-471b-8794-1623c126f0f2-combined-ca-bundle\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.930039 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c765a2ba-ed3c-471b-8794-1623c126f0f2-openstack-config-secret\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.930127 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c765a2ba-ed3c-471b-8794-1623c126f0f2-openstack-config\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.930162 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgkt7\" (UniqueName: \"kubernetes.io/projected/c765a2ba-ed3c-471b-8794-1623c126f0f2-kube-api-access-hgkt7\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:08 crc kubenswrapper[4647]: I1128 15:44:08.930245 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.031315 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgkt7\" (UniqueName: \"kubernetes.io/projected/c765a2ba-ed3c-471b-8794-1623c126f0f2-kube-api-access-hgkt7\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.031433 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c765a2ba-ed3c-471b-8794-1623c126f0f2-combined-ca-bundle\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.031473 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c765a2ba-ed3c-471b-8794-1623c126f0f2-openstack-config-secret\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.031545 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c765a2ba-ed3c-471b-8794-1623c126f0f2-openstack-config\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.032487 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c765a2ba-ed3c-471b-8794-1623c126f0f2-openstack-config\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.036644 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c765a2ba-ed3c-471b-8794-1623c126f0f2-combined-ca-bundle\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.037831 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c765a2ba-ed3c-471b-8794-1623c126f0f2-openstack-config-secret\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.076097 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgkt7\" (UniqueName: \"kubernetes.io/projected/c765a2ba-ed3c-471b-8794-1623c126f0f2-kube-api-access-hgkt7\") pod \"openstackclient\" (UID: \"c765a2ba-ed3c-471b-8794-1623c126f0f2\") " pod="openstack/openstackclient" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.226803 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.296884 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698","Type":"ContainerDied","Data":"359250720d615d6f3bac504f234ccf3453188e95ebf9f68f0cbf3fc444bd53aa"} Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.296952 4647 scope.go:117] "RemoveContainer" containerID="01e74c52fb46be2f25b6e276b0c1ddd8d7c230b767afc82794a56206be4802c8" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.296966 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.360608 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.390644 4647 scope.go:117] "RemoveContainer" containerID="f98af54805ac902d8e3ec0277313dc1c457e621a523973404955f66da3147065" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.408550 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.460827 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.475470 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.499776 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.519787 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.546249 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.546313 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.546346 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-scripts\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.546368 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.546394 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q495s\" (UniqueName: \"kubernetes.io/projected/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-kube-api-access-q495s\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.546470 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-config-data\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.648338 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-config-data\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.648496 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.648566 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.648614 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-scripts\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.648654 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.648693 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q495s\" (UniqueName: \"kubernetes.io/projected/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-kube-api-access-q495s\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.649850 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.657147 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.657508 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.659958 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-config-data\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.662205 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-scripts\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.674325 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q495s\" (UniqueName: \"kubernetes.io/projected/a6eb59e0-a2aa-49d0-a662-8e37f51004ef-kube-api-access-q495s\") pod \"cinder-scheduler-0\" (UID: \"a6eb59e0-a2aa-49d0-a662-8e37f51004ef\") " pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.801793 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:44:09 crc kubenswrapper[4647]: I1128 15:44:09.962368 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 15:44:09 crc kubenswrapper[4647]: W1128 15:44:09.972303 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc765a2ba_ed3c_471b_8794_1623c126f0f2.slice/crio-83e06e6eb23ed134cdeb641af0c5d4f1296496d992aa8c816cc00c6328680405 WatchSource:0}: Error finding container 83e06e6eb23ed134cdeb641af0c5d4f1296496d992aa8c816cc00c6328680405: Status 404 returned error can't find the container with id 83e06e6eb23ed134cdeb641af0c5d4f1296496d992aa8c816cc00c6328680405 Nov 28 15:44:10 crc kubenswrapper[4647]: I1128 15:44:10.311429 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"c765a2ba-ed3c-471b-8794-1623c126f0f2","Type":"ContainerStarted","Data":"83e06e6eb23ed134cdeb641af0c5d4f1296496d992aa8c816cc00c6328680405"} Nov 28 15:44:10 crc kubenswrapper[4647]: I1128 15:44:10.338268 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:44:10 crc kubenswrapper[4647]: I1128 15:44:10.470519 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698" path="/var/lib/kubelet/pods/c5d9b1ee-ccd6-44a9-ac32-a7c8c75fd698/volumes" Nov 28 15:44:11 crc kubenswrapper[4647]: I1128 15:44:11.348323 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a6eb59e0-a2aa-49d0-a662-8e37f51004ef","Type":"ContainerStarted","Data":"177e9394298317d99b496a03d6d312255522f318880bd3b8ef14259c1331d287"} Nov 28 15:44:11 crc kubenswrapper[4647]: I1128 15:44:11.350029 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a6eb59e0-a2aa-49d0-a662-8e37f51004ef","Type":"ContainerStarted","Data":"241dcc233941dd1cde8f1a5c6c3aff8a4a50ed2b74a14b0c35495178bd72a12e"} Nov 28 15:44:12 crc kubenswrapper[4647]: I1128 15:44:12.375546 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a6eb59e0-a2aa-49d0-a662-8e37f51004ef","Type":"ContainerStarted","Data":"54f4f98126fb541cef82d5031ed1afc808dcc1d3ed0359710faaf5fca93ead98"} Nov 28 15:44:12 crc kubenswrapper[4647]: I1128 15:44:12.399322 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.399298086 podStartE2EDuration="3.399298086s" podCreationTimestamp="2025-11-28 15:44:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:12.397517689 +0000 UTC m=+1182.245124110" watchObservedRunningTime="2025-11-28 15:44:12.399298086 +0000 UTC m=+1182.246904507" Nov 28 15:44:14 crc kubenswrapper[4647]: I1128 15:44:14.647817 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6565667588-kf4hg" Nov 28 15:44:14 crc kubenswrapper[4647]: I1128 15:44:14.698551 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6565667588-kf4hg" Nov 28 15:44:14 crc kubenswrapper[4647]: I1128 15:44:14.802678 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 15:44:14 crc kubenswrapper[4647]: I1128 15:44:14.889377 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.172827 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6f577f58dc-7rp75"] Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.181071 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.197664 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.197831 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.198034 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.213401 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6f577f58dc-7rp75"] Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.305735 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-internal-tls-certs\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.305796 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5168b52-c295-45d6-aa36-932b5bb95a97-log-httpd\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.305833 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-combined-ca-bundle\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.305884 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-config-data\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.305912 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6twx\" (UniqueName: \"kubernetes.io/projected/c5168b52-c295-45d6-aa36-932b5bb95a97-kube-api-access-w6twx\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.305967 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c5168b52-c295-45d6-aa36-932b5bb95a97-etc-swift\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.306010 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5168b52-c295-45d6-aa36-932b5bb95a97-run-httpd\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.306041 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-public-tls-certs\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.407341 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6twx\" (UniqueName: \"kubernetes.io/projected/c5168b52-c295-45d6-aa36-932b5bb95a97-kube-api-access-w6twx\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.407406 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c5168b52-c295-45d6-aa36-932b5bb95a97-etc-swift\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.407457 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5168b52-c295-45d6-aa36-932b5bb95a97-run-httpd\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.407485 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-public-tls-certs\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.407540 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-internal-tls-certs\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.412109 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5168b52-c295-45d6-aa36-932b5bb95a97-log-httpd\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.412195 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-combined-ca-bundle\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.412709 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-config-data\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.413097 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5168b52-c295-45d6-aa36-932b5bb95a97-run-httpd\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.422664 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5168b52-c295-45d6-aa36-932b5bb95a97-log-httpd\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.454840 4647 generic.go:334] "Generic (PLEG): container finished" podID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerID="420ea4980bc1bd80fd5479d418826195243c81ba3ab1d29a3b1bf8f7eb2fbb66" exitCode=137 Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.454962 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66c6c46cdb-xgv7h" event={"ID":"278aef39-0aaf-4d33-b167-0f0cca8248fd","Type":"ContainerDied","Data":"420ea4980bc1bd80fd5479d418826195243c81ba3ab1d29a3b1bf8f7eb2fbb66"} Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.455073 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66c6c46cdb-xgv7h" event={"ID":"278aef39-0aaf-4d33-b167-0f0cca8248fd","Type":"ContainerStarted","Data":"3c18b91e7b1439f8227b948f2281e3ee534d373728f5845fa589469f1c989899"} Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.465403 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c5168b52-c295-45d6-aa36-932b5bb95a97-etc-swift\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.465943 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-config-data\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.466552 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-public-tls-certs\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.467299 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-combined-ca-bundle\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.471813 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5168b52-c295-45d6-aa36-932b5bb95a97-internal-tls-certs\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.475282 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6twx\" (UniqueName: \"kubernetes.io/projected/c5168b52-c295-45d6-aa36-932b5bb95a97-kube-api-access-w6twx\") pod \"swift-proxy-6f577f58dc-7rp75\" (UID: \"c5168b52-c295-45d6-aa36-932b5bb95a97\") " pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:16 crc kubenswrapper[4647]: I1128 15:44:16.504728 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:17 crc kubenswrapper[4647]: I1128 15:44:17.319362 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6f577f58dc-7rp75"] Nov 28 15:44:17 crc kubenswrapper[4647]: W1128 15:44:17.331213 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5168b52_c295_45d6_aa36_932b5bb95a97.slice/crio-841799e37087960c7373c8510465921fae4580bdf267281ad00870b7648c4cb1 WatchSource:0}: Error finding container 841799e37087960c7373c8510465921fae4580bdf267281ad00870b7648c4cb1: Status 404 returned error can't find the container with id 841799e37087960c7373c8510465921fae4580bdf267281ad00870b7648c4cb1 Nov 28 15:44:17 crc kubenswrapper[4647]: I1128 15:44:17.474869 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6f577f58dc-7rp75" event={"ID":"c5168b52-c295-45d6-aa36-932b5bb95a97","Type":"ContainerStarted","Data":"841799e37087960c7373c8510465921fae4580bdf267281ad00870b7648c4cb1"} Nov 28 15:44:17 crc kubenswrapper[4647]: I1128 15:44:17.776269 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:17 crc kubenswrapper[4647]: I1128 15:44:17.777131 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="ceilometer-central-agent" containerID="cri-o://a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9" gracePeriod=30 Nov 28 15:44:17 crc kubenswrapper[4647]: I1128 15:44:17.777246 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="sg-core" containerID="cri-o://177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e" gracePeriod=30 Nov 28 15:44:17 crc kubenswrapper[4647]: I1128 15:44:17.777343 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="ceilometer-notification-agent" containerID="cri-o://7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036" gracePeriod=30 Nov 28 15:44:17 crc kubenswrapper[4647]: I1128 15:44:17.777487 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="proxy-httpd" containerID="cri-o://be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a" gracePeriod=30 Nov 28 15:44:17 crc kubenswrapper[4647]: I1128 15:44:17.805006 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.164:3000/\": EOF" Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.490546 4647 generic.go:334] "Generic (PLEG): container finished" podID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerID="be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a" exitCode=0 Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.491058 4647 generic.go:334] "Generic (PLEG): container finished" podID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerID="177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e" exitCode=2 Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.491072 4647 generic.go:334] "Generic (PLEG): container finished" podID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerID="a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9" exitCode=0 Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.490706 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e0748985-5b7e-430b-80f8-7b09e00f5f91","Type":"ContainerDied","Data":"be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a"} Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.491153 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e0748985-5b7e-430b-80f8-7b09e00f5f91","Type":"ContainerDied","Data":"177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e"} Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.491168 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e0748985-5b7e-430b-80f8-7b09e00f5f91","Type":"ContainerDied","Data":"a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9"} Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.497267 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6f577f58dc-7rp75" event={"ID":"c5168b52-c295-45d6-aa36-932b5bb95a97","Type":"ContainerStarted","Data":"ea4902b23d888b7284b6767045a21ddb5cca99d49ba306ea8c55f72465085a8a"} Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.497300 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6f577f58dc-7rp75" event={"ID":"c5168b52-c295-45d6-aa36-932b5bb95a97","Type":"ContainerStarted","Data":"049a38076f121c6787b02c24e25b6988b97be97151437a99b9b692c351edb486"} Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.497957 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.498076 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:18 crc kubenswrapper[4647]: I1128 15:44:18.529533 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6f577f58dc-7rp75" podStartSLOduration=2.529501653 podStartE2EDuration="2.529501653s" podCreationTimestamp="2025-11-28 15:44:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:18.521160942 +0000 UTC m=+1188.368767363" watchObservedRunningTime="2025-11-28 15:44:18.529501653 +0000 UTC m=+1188.377108074" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.478358 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.509723 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-scripts\") pod \"e0748985-5b7e-430b-80f8-7b09e00f5f91\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.509783 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-combined-ca-bundle\") pod \"e0748985-5b7e-430b-80f8-7b09e00f5f91\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.509854 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-config-data\") pod \"e0748985-5b7e-430b-80f8-7b09e00f5f91\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.509889 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnzp4\" (UniqueName: \"kubernetes.io/projected/e0748985-5b7e-430b-80f8-7b09e00f5f91-kube-api-access-wnzp4\") pod \"e0748985-5b7e-430b-80f8-7b09e00f5f91\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.509919 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-log-httpd\") pod \"e0748985-5b7e-430b-80f8-7b09e00f5f91\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.509984 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-sg-core-conf-yaml\") pod \"e0748985-5b7e-430b-80f8-7b09e00f5f91\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.510090 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-run-httpd\") pod \"e0748985-5b7e-430b-80f8-7b09e00f5f91\" (UID: \"e0748985-5b7e-430b-80f8-7b09e00f5f91\") " Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.512219 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e0748985-5b7e-430b-80f8-7b09e00f5f91" (UID: "e0748985-5b7e-430b-80f8-7b09e00f5f91"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.523274 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e0748985-5b7e-430b-80f8-7b09e00f5f91" (UID: "e0748985-5b7e-430b-80f8-7b09e00f5f91"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.552115 4647 generic.go:334] "Generic (PLEG): container finished" podID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerID="7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036" exitCode=0 Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.553059 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.553584 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e0748985-5b7e-430b-80f8-7b09e00f5f91","Type":"ContainerDied","Data":"7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036"} Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.553612 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e0748985-5b7e-430b-80f8-7b09e00f5f91","Type":"ContainerDied","Data":"5f2d55c412d76be367074b464f8ce2beba325e3ffc61e738e74d51481ed601b4"} Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.553630 4647 scope.go:117] "RemoveContainer" containerID="be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.573507 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0748985-5b7e-430b-80f8-7b09e00f5f91-kube-api-access-wnzp4" (OuterVolumeSpecName: "kube-api-access-wnzp4") pod "e0748985-5b7e-430b-80f8-7b09e00f5f91" (UID: "e0748985-5b7e-430b-80f8-7b09e00f5f91"). InnerVolumeSpecName "kube-api-access-wnzp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.584439 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-scripts" (OuterVolumeSpecName: "scripts") pod "e0748985-5b7e-430b-80f8-7b09e00f5f91" (UID: "e0748985-5b7e-430b-80f8-7b09e00f5f91"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.612920 4647 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.612955 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.612964 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnzp4\" (UniqueName: \"kubernetes.io/projected/e0748985-5b7e-430b-80f8-7b09e00f5f91-kube-api-access-wnzp4\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.612975 4647 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0748985-5b7e-430b-80f8-7b09e00f5f91-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.676679 4647 scope.go:117] "RemoveContainer" containerID="177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.717543 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e0748985-5b7e-430b-80f8-7b09e00f5f91" (UID: "e0748985-5b7e-430b-80f8-7b09e00f5f91"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.722162 4647 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.811897 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e0748985-5b7e-430b-80f8-7b09e00f5f91" (UID: "e0748985-5b7e-430b-80f8-7b09e00f5f91"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.826045 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.829108 4647 scope.go:117] "RemoveContainer" containerID="7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.842868 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-config-data" (OuterVolumeSpecName: "config-data") pod "e0748985-5b7e-430b-80f8-7b09e00f5f91" (UID: "e0748985-5b7e-430b-80f8-7b09e00f5f91"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.863325 4647 scope.go:117] "RemoveContainer" containerID="a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.895528 4647 scope.go:117] "RemoveContainer" containerID="be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a" Nov 28 15:44:19 crc kubenswrapper[4647]: E1128 15:44:19.896058 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a\": container with ID starting with be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a not found: ID does not exist" containerID="be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.896093 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a"} err="failed to get container status \"be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a\": rpc error: code = NotFound desc = could not find container \"be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a\": container with ID starting with be328e8c5bb52aa92d00e97dc3714dbf68d602aac45143f98f12cd0ce95fef2a not found: ID does not exist" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.896120 4647 scope.go:117] "RemoveContainer" containerID="177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e" Nov 28 15:44:19 crc kubenswrapper[4647]: E1128 15:44:19.896531 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e\": container with ID starting with 177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e not found: ID does not exist" containerID="177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.896554 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e"} err="failed to get container status \"177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e\": rpc error: code = NotFound desc = could not find container \"177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e\": container with ID starting with 177152cd15384a71442dc8a3c226628cfd4b3c33aa5fdd12596a6f85ee44e17e not found: ID does not exist" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.896571 4647 scope.go:117] "RemoveContainer" containerID="7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036" Nov 28 15:44:19 crc kubenswrapper[4647]: E1128 15:44:19.897401 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036\": container with ID starting with 7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036 not found: ID does not exist" containerID="7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.897471 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036"} err="failed to get container status \"7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036\": rpc error: code = NotFound desc = could not find container \"7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036\": container with ID starting with 7cf3cd211176219aa116d9bad11b3084eca9bd98ccf68fdc348f98778f5ae036 not found: ID does not exist" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.897501 4647 scope.go:117] "RemoveContainer" containerID="a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9" Nov 28 15:44:19 crc kubenswrapper[4647]: E1128 15:44:19.897827 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9\": container with ID starting with a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9 not found: ID does not exist" containerID="a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.897861 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9"} err="failed to get container status \"a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9\": rpc error: code = NotFound desc = could not find container \"a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9\": container with ID starting with a80f647f41edb6e937e8c1c40e06cc8721425e3574af15e2f0a7083ae14bd8a9 not found: ID does not exist" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.913821 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.924290 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.929080 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0748985-5b7e-430b-80f8-7b09e00f5f91-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.951753 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:19 crc kubenswrapper[4647]: E1128 15:44:19.952277 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="ceilometer-notification-agent" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.952302 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="ceilometer-notification-agent" Nov 28 15:44:19 crc kubenswrapper[4647]: E1128 15:44:19.952344 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="ceilometer-central-agent" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.952356 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="ceilometer-central-agent" Nov 28 15:44:19 crc kubenswrapper[4647]: E1128 15:44:19.952375 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="proxy-httpd" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.952382 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="proxy-httpd" Nov 28 15:44:19 crc kubenswrapper[4647]: E1128 15:44:19.952394 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="sg-core" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.952401 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="sg-core" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.952667 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="ceilometer-notification-agent" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.952693 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="ceilometer-central-agent" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.952709 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="sg-core" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.952729 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" containerName="proxy-httpd" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.955638 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.963777 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.963968 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:44:19 crc kubenswrapper[4647]: I1128 15:44:19.969593 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.030824 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-scripts\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.030888 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-log-httpd\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.030913 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-run-httpd\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.030934 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.030959 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96rt8\" (UniqueName: \"kubernetes.io/projected/19aa14a1-403e-4292-9136-6b6c514ba46c-kube-api-access-96rt8\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.031003 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-config-data\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.031112 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.133234 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.133781 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-scripts\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.133824 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-log-httpd\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.133847 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-run-httpd\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.133867 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.133896 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96rt8\" (UniqueName: \"kubernetes.io/projected/19aa14a1-403e-4292-9136-6b6c514ba46c-kube-api-access-96rt8\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.133932 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-config-data\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.136309 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-run-httpd\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.140952 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.141799 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-scripts\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.142034 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-log-httpd\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.142117 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.149306 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-config-data\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.161256 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96rt8\" (UniqueName: \"kubernetes.io/projected/19aa14a1-403e-4292-9136-6b6c514ba46c-kube-api-access-96rt8\") pod \"ceilometer-0\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.210174 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.288222 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:20 crc kubenswrapper[4647]: I1128 15:44:20.448880 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0748985-5b7e-430b-80f8-7b09e00f5f91" path="/var/lib/kubelet/pods/e0748985-5b7e-430b-80f8-7b09e00f5f91/volumes" Nov 28 15:44:21 crc kubenswrapper[4647]: I1128 15:44:21.005872 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:21 crc kubenswrapper[4647]: I1128 15:44:21.154084 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:44:21 crc kubenswrapper[4647]: I1128 15:44:21.154715 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerName="glance-log" containerID="cri-o://bd41952dfd11436229ca57cb4d4d06268634c6154092aa193d62c541c3483f0c" gracePeriod=30 Nov 28 15:44:21 crc kubenswrapper[4647]: I1128 15:44:21.154950 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerName="glance-httpd" containerID="cri-o://2d73c415a9268be2172ddccb53651d0ba7ee43385a1415a1a5f80af0b497103e" gracePeriod=30 Nov 28 15:44:21 crc kubenswrapper[4647]: E1128 15:44:21.364679 4647 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a15d5b4_a376_4f2e_bbf2_e04474864d2c.slice/crio-bd41952dfd11436229ca57cb4d4d06268634c6154092aa193d62c541c3483f0c.scope\": RecentStats: unable to find data in memory cache]" Nov 28 15:44:21 crc kubenswrapper[4647]: I1128 15:44:21.616822 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:21 crc kubenswrapper[4647]: I1128 15:44:21.624120 4647 generic.go:334] "Generic (PLEG): container finished" podID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerID="bd41952dfd11436229ca57cb4d4d06268634c6154092aa193d62c541c3483f0c" exitCode=143 Nov 28 15:44:21 crc kubenswrapper[4647]: I1128 15:44:21.624211 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2a15d5b4-a376-4f2e-bbf2-e04474864d2c","Type":"ContainerDied","Data":"bd41952dfd11436229ca57cb4d4d06268634c6154092aa193d62c541c3483f0c"} Nov 28 15:44:21 crc kubenswrapper[4647]: I1128 15:44:21.625647 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19aa14a1-403e-4292-9136-6b6c514ba46c","Type":"ContainerStarted","Data":"cea1d03a5a62a9710b50eb9c1a42d744de90f3ac6b54b40c8b71dc6a37bad7d0"} Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.619627 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-7tcfv"] Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.620681 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-7tcfv" Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.688774 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-7tcfv"] Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.710229 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcssp\" (UniqueName: \"kubernetes.io/projected/9c2c650d-ce2e-4087-bb19-f199ed233bcf-kube-api-access-mcssp\") pod \"nova-api-db-create-7tcfv\" (UID: \"9c2c650d-ce2e-4087-bb19-f199ed233bcf\") " pod="openstack/nova-api-db-create-7tcfv" Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.763875 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-6v9rt"] Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.765640 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6v9rt" Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.813437 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66ghq\" (UniqueName: \"kubernetes.io/projected/4148fe73-7269-47f4-9c12-60ec17192441-kube-api-access-66ghq\") pod \"nova-cell0-db-create-6v9rt\" (UID: \"4148fe73-7269-47f4-9c12-60ec17192441\") " pod="openstack/nova-cell0-db-create-6v9rt" Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.813622 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcssp\" (UniqueName: \"kubernetes.io/projected/9c2c650d-ce2e-4087-bb19-f199ed233bcf-kube-api-access-mcssp\") pod \"nova-api-db-create-7tcfv\" (UID: \"9c2c650d-ce2e-4087-bb19-f199ed233bcf\") " pod="openstack/nova-api-db-create-7tcfv" Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.849880 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcssp\" (UniqueName: \"kubernetes.io/projected/9c2c650d-ce2e-4087-bb19-f199ed233bcf-kube-api-access-mcssp\") pod \"nova-api-db-create-7tcfv\" (UID: \"9c2c650d-ce2e-4087-bb19-f199ed233bcf\") " pod="openstack/nova-api-db-create-7tcfv" Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.863485 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-6v9rt"] Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.928651 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66ghq\" (UniqueName: \"kubernetes.io/projected/4148fe73-7269-47f4-9c12-60ec17192441-kube-api-access-66ghq\") pod \"nova-cell0-db-create-6v9rt\" (UID: \"4148fe73-7269-47f4-9c12-60ec17192441\") " pod="openstack/nova-cell0-db-create-6v9rt" Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.942494 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-7tcfv" Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.987328 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-bkg6q"] Nov 28 15:44:22 crc kubenswrapper[4647]: I1128 15:44:22.990836 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bkg6q" Nov 28 15:44:23 crc kubenswrapper[4647]: I1128 15:44:23.017153 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66ghq\" (UniqueName: \"kubernetes.io/projected/4148fe73-7269-47f4-9c12-60ec17192441-kube-api-access-66ghq\") pod \"nova-cell0-db-create-6v9rt\" (UID: \"4148fe73-7269-47f4-9c12-60ec17192441\") " pod="openstack/nova-cell0-db-create-6v9rt" Nov 28 15:44:23 crc kubenswrapper[4647]: I1128 15:44:23.042745 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-bkg6q"] Nov 28 15:44:23 crc kubenswrapper[4647]: I1128 15:44:23.129653 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6v9rt" Nov 28 15:44:23 crc kubenswrapper[4647]: I1128 15:44:23.134477 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ms65\" (UniqueName: \"kubernetes.io/projected/63e70ac4-fcb7-4bce-a0b2-5c148e71514f-kube-api-access-5ms65\") pod \"nova-cell1-db-create-bkg6q\" (UID: \"63e70ac4-fcb7-4bce-a0b2-5c148e71514f\") " pod="openstack/nova-cell1-db-create-bkg6q" Nov 28 15:44:23 crc kubenswrapper[4647]: I1128 15:44:23.236672 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ms65\" (UniqueName: \"kubernetes.io/projected/63e70ac4-fcb7-4bce-a0b2-5c148e71514f-kube-api-access-5ms65\") pod \"nova-cell1-db-create-bkg6q\" (UID: \"63e70ac4-fcb7-4bce-a0b2-5c148e71514f\") " pod="openstack/nova-cell1-db-create-bkg6q" Nov 28 15:44:23 crc kubenswrapper[4647]: I1128 15:44:23.265407 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ms65\" (UniqueName: \"kubernetes.io/projected/63e70ac4-fcb7-4bce-a0b2-5c148e71514f-kube-api-access-5ms65\") pod \"nova-cell1-db-create-bkg6q\" (UID: \"63e70ac4-fcb7-4bce-a0b2-5c148e71514f\") " pod="openstack/nova-cell1-db-create-bkg6q" Nov 28 15:44:23 crc kubenswrapper[4647]: I1128 15:44:23.382032 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bkg6q" Nov 28 15:44:24 crc kubenswrapper[4647]: I1128 15:44:24.603728 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.152:9292/healthcheck\": read tcp 10.217.0.2:51598->10.217.0.152:9292: read: connection reset by peer" Nov 28 15:44:24 crc kubenswrapper[4647]: I1128 15:44:24.604195 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.152:9292/healthcheck\": read tcp 10.217.0.2:51596->10.217.0.152:9292: read: connection reset by peer" Nov 28 15:44:25 crc kubenswrapper[4647]: I1128 15:44:25.426051 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:44:25 crc kubenswrapper[4647]: I1128 15:44:25.427114 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:44:25 crc kubenswrapper[4647]: I1128 15:44:25.429954 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-66c6c46cdb-xgv7h" podUID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 28 15:44:25 crc kubenswrapper[4647]: I1128 15:44:25.679389 4647 generic.go:334] "Generic (PLEG): container finished" podID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerID="2d73c415a9268be2172ddccb53651d0ba7ee43385a1415a1a5f80af0b497103e" exitCode=0 Nov 28 15:44:25 crc kubenswrapper[4647]: I1128 15:44:25.679479 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2a15d5b4-a376-4f2e-bbf2-e04474864d2c","Type":"ContainerDied","Data":"2d73c415a9268be2172ddccb53651d0ba7ee43385a1415a1a5f80af0b497103e"} Nov 28 15:44:25 crc kubenswrapper[4647]: I1128 15:44:25.811286 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:44:25 crc kubenswrapper[4647]: I1128 15:44:25.811557 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerName="glance-log" containerID="cri-o://dfe5f51562f95457637fa84764fa1b2996ec3b1fca9e5ebbb693c1255ea1f996" gracePeriod=30 Nov 28 15:44:25 crc kubenswrapper[4647]: I1128 15:44:25.811693 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerName="glance-httpd" containerID="cri-o://fb47b2636e4ccbf78e445df7f2965c002d36fc7be4b00ba47ce4f5787482f287" gracePeriod=30 Nov 28 15:44:25 crc kubenswrapper[4647]: I1128 15:44:25.826896 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/glance-default-external-api-0" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.153:9292/healthcheck\": EOF" Nov 28 15:44:25 crc kubenswrapper[4647]: I1128 15:44:25.829876 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.153:9292/healthcheck\": EOF" Nov 28 15:44:26 crc kubenswrapper[4647]: I1128 15:44:26.519254 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:26 crc kubenswrapper[4647]: I1128 15:44:26.520330 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6f577f58dc-7rp75" Nov 28 15:44:26 crc kubenswrapper[4647]: I1128 15:44:26.698903 4647 generic.go:334] "Generic (PLEG): container finished" podID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerID="dfe5f51562f95457637fa84764fa1b2996ec3b1fca9e5ebbb693c1255ea1f996" exitCode=143 Nov 28 15:44:26 crc kubenswrapper[4647]: I1128 15:44:26.699187 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4c49d4cb-49bc-40f4-b1e9-03c730876bdb","Type":"ContainerDied","Data":"dfe5f51562f95457637fa84764fa1b2996ec3b1fca9e5ebbb693c1255ea1f996"} Nov 28 15:44:29 crc kubenswrapper[4647]: I1128 15:44:29.733012 4647 generic.go:334] "Generic (PLEG): container finished" podID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerID="fb47b2636e4ccbf78e445df7f2965c002d36fc7be4b00ba47ce4f5787482f287" exitCode=0 Nov 28 15:44:29 crc kubenswrapper[4647]: I1128 15:44:29.733468 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4c49d4cb-49bc-40f4-b1e9-03c730876bdb","Type":"ContainerDied","Data":"fb47b2636e4ccbf78e445df7f2965c002d36fc7be4b00ba47ce4f5787482f287"} Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.733315 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.787036 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"c765a2ba-ed3c-471b-8794-1623c126f0f2","Type":"ContainerStarted","Data":"a93d68f13e1ed2b949ebd9a5a1473a9151400d6ebc4c629ccf98518f0df7230b"} Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.810248 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2a15d5b4-a376-4f2e-bbf2-e04474864d2c","Type":"ContainerDied","Data":"ec4d014cde13e040ae693265e3bb2ebafc38ac1e4088f46d1ad18cc7f635a19f"} Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.810302 4647 scope.go:117] "RemoveContainer" containerID="2d73c415a9268be2172ddccb53651d0ba7ee43385a1415a1a5f80af0b497103e" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.810479 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.819720 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.53621139 podStartE2EDuration="22.819697494s" podCreationTimestamp="2025-11-28 15:44:08 +0000 UTC" firstStartedPulling="2025-11-28 15:44:09.975359685 +0000 UTC m=+1179.822966106" lastFinishedPulling="2025-11-28 15:44:30.258845789 +0000 UTC m=+1200.106452210" observedRunningTime="2025-11-28 15:44:30.813240833 +0000 UTC m=+1200.660847254" watchObservedRunningTime="2025-11-28 15:44:30.819697494 +0000 UTC m=+1200.667303915" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.840835 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-internal-tls-certs\") pod \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.840921 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-scripts\") pod \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.841012 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-logs\") pod \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.841048 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-httpd-run\") pod \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.841098 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-config-data\") pod \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.841241 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztnj5\" (UniqueName: \"kubernetes.io/projected/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-kube-api-access-ztnj5\") pod \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.841286 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.841351 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-combined-ca-bundle\") pod \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\" (UID: \"2a15d5b4-a376-4f2e-bbf2-e04474864d2c\") " Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.843873 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "2a15d5b4-a376-4f2e-bbf2-e04474864d2c" (UID: "2a15d5b4-a376-4f2e-bbf2-e04474864d2c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.859140 4647 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.861789 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-logs" (OuterVolumeSpecName: "logs") pod "2a15d5b4-a376-4f2e-bbf2-e04474864d2c" (UID: "2a15d5b4-a376-4f2e-bbf2-e04474864d2c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.869134 4647 scope.go:117] "RemoveContainer" containerID="bd41952dfd11436229ca57cb4d4d06268634c6154092aa193d62c541c3483f0c" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.883683 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-scripts" (OuterVolumeSpecName: "scripts") pod "2a15d5b4-a376-4f2e-bbf2-e04474864d2c" (UID: "2a15d5b4-a376-4f2e-bbf2-e04474864d2c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.887680 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-kube-api-access-ztnj5" (OuterVolumeSpecName: "kube-api-access-ztnj5") pod "2a15d5b4-a376-4f2e-bbf2-e04474864d2c" (UID: "2a15d5b4-a376-4f2e-bbf2-e04474864d2c"). InnerVolumeSpecName "kube-api-access-ztnj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.889811 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "2a15d5b4-a376-4f2e-bbf2-e04474864d2c" (UID: "2a15d5b4-a376-4f2e-bbf2-e04474864d2c"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.929683 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a15d5b4-a376-4f2e-bbf2-e04474864d2c" (UID: "2a15d5b4-a376-4f2e-bbf2-e04474864d2c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.960609 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "2a15d5b4-a376-4f2e-bbf2-e04474864d2c" (UID: "2a15d5b4-a376-4f2e-bbf2-e04474864d2c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.966956 4647 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.967000 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.967010 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.967020 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztnj5\" (UniqueName: \"kubernetes.io/projected/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-kube-api-access-ztnj5\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.967047 4647 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.967057 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.988205 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-config-data" (OuterVolumeSpecName: "config-data") pod "2a15d5b4-a376-4f2e-bbf2-e04474864d2c" (UID: "2a15d5b4-a376-4f2e-bbf2-e04474864d2c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.988609 4647 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 28 15:44:30 crc kubenswrapper[4647]: I1128 15:44:30.994001 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.070659 4647 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.070690 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a15d5b4-a376-4f2e-bbf2-e04474864d2c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.171639 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.171722 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-httpd-run\") pod \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.171867 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-config-data\") pod \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.171938 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-logs\") pod \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.172030 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-combined-ca-bundle\") pod \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.172099 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsc9k\" (UniqueName: \"kubernetes.io/projected/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-kube-api-access-tsc9k\") pod \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.172143 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-scripts\") pod \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.172177 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-public-tls-certs\") pod \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\" (UID: \"4c49d4cb-49bc-40f4-b1e9-03c730876bdb\") " Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.172251 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4c49d4cb-49bc-40f4-b1e9-03c730876bdb" (UID: "4c49d4cb-49bc-40f4-b1e9-03c730876bdb"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.172745 4647 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.172985 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-logs" (OuterVolumeSpecName: "logs") pod "4c49d4cb-49bc-40f4-b1e9-03c730876bdb" (UID: "4c49d4cb-49bc-40f4-b1e9-03c730876bdb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.181395 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "4c49d4cb-49bc-40f4-b1e9-03c730876bdb" (UID: "4c49d4cb-49bc-40f4-b1e9-03c730876bdb"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.207280 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-scripts" (OuterVolumeSpecName: "scripts") pod "4c49d4cb-49bc-40f4-b1e9-03c730876bdb" (UID: "4c49d4cb-49bc-40f4-b1e9-03c730876bdb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.213091 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.272518 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.280019 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-kube-api-access-tsc9k" (OuterVolumeSpecName: "kube-api-access-tsc9k") pod "4c49d4cb-49bc-40f4-b1e9-03c730876bdb" (UID: "4c49d4cb-49bc-40f4-b1e9-03c730876bdb"). InnerVolumeSpecName "kube-api-access-tsc9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.290380 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.290472 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsc9k\" (UniqueName: \"kubernetes.io/projected/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-kube-api-access-tsc9k\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.290487 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.290513 4647 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.309176 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c49d4cb-49bc-40f4-b1e9-03c730876bdb" (UID: "4c49d4cb-49bc-40f4-b1e9-03c730876bdb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.330482 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:44:31 crc kubenswrapper[4647]: E1128 15:44:31.331084 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerName="glance-httpd" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.331100 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerName="glance-httpd" Nov 28 15:44:31 crc kubenswrapper[4647]: E1128 15:44:31.331115 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerName="glance-log" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.331123 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerName="glance-log" Nov 28 15:44:31 crc kubenswrapper[4647]: E1128 15:44:31.331142 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerName="glance-log" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.331149 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerName="glance-log" Nov 28 15:44:31 crc kubenswrapper[4647]: E1128 15:44:31.331190 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerName="glance-httpd" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.331196 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerName="glance-httpd" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.331426 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerName="glance-httpd" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.331450 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" containerName="glance-log" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.331467 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerName="glance-log" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.331476 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" containerName="glance-httpd" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.344118 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.352657 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.356163 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.357073 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.387497 4647 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.397843 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-bkg6q"] Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.413107 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.413147 4647 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.452493 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-7tcfv"] Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.471953 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-6v9rt"] Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.477645 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-config-data" (OuterVolumeSpecName: "config-data") pod "4c49d4cb-49bc-40f4-b1e9-03c730876bdb" (UID: "4c49d4cb-49bc-40f4-b1e9-03c730876bdb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.492155 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4c49d4cb-49bc-40f4-b1e9-03c730876bdb" (UID: "4c49d4cb-49bc-40f4-b1e9-03c730876bdb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.519000 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsh7f\" (UniqueName: \"kubernetes.io/projected/38dcdbad-1599-4387-8587-6676317adbc3-kube-api-access-vsh7f\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.519085 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.519109 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.519209 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.519230 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38dcdbad-1599-4387-8587-6676317adbc3-logs\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.519252 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.519284 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/38dcdbad-1599-4387-8587-6676317adbc3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.519307 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.519391 4647 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.519404 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c49d4cb-49bc-40f4-b1e9-03c730876bdb-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.625475 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.625525 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.626168 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.626225 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38dcdbad-1599-4387-8587-6676317adbc3-logs\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.626249 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.626306 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/38dcdbad-1599-4387-8587-6676317adbc3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.626333 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.626403 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsh7f\" (UniqueName: \"kubernetes.io/projected/38dcdbad-1599-4387-8587-6676317adbc3-kube-api-access-vsh7f\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.631450 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.631869 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/38dcdbad-1599-4387-8587-6676317adbc3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.632238 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38dcdbad-1599-4387-8587-6676317adbc3-logs\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.640103 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.640316 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.647459 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.652004 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38dcdbad-1599-4387-8587-6676317adbc3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.658964 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsh7f\" (UniqueName: \"kubernetes.io/projected/38dcdbad-1599-4387-8587-6676317adbc3-kube-api-access-vsh7f\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.714697 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"38dcdbad-1599-4387-8587-6676317adbc3\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.729801 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.824471 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19aa14a1-403e-4292-9136-6b6c514ba46c","Type":"ContainerStarted","Data":"198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e"} Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.825425 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-6v9rt" event={"ID":"4148fe73-7269-47f4-9c12-60ec17192441","Type":"ContainerStarted","Data":"fe715d005adb5904a97b031e5f9f4debe63f791573d3743755514541df4386f3"} Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.829256 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-7tcfv" event={"ID":"9c2c650d-ce2e-4087-bb19-f199ed233bcf","Type":"ContainerStarted","Data":"2b5f11fe8045ab260d9124996c5ed4b7d0b32596b742415b8798424ca800b7dd"} Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.830844 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bkg6q" event={"ID":"63e70ac4-fcb7-4bce-a0b2-5c148e71514f","Type":"ContainerStarted","Data":"8d1072024af1af1e0e53f199c794be6f137f33468056085c05a92d10471ffb39"} Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.832865 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.833561 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4c49d4cb-49bc-40f4-b1e9-03c730876bdb","Type":"ContainerDied","Data":"6d9c7eceac7cd47bc3542c38aa735b9f5ae0509f9e839cd0e6c07f4223957e72"} Nov 28 15:44:31 crc kubenswrapper[4647]: I1128 15:44:31.833607 4647 scope.go:117] "RemoveContainer" containerID="fb47b2636e4ccbf78e445df7f2965c002d36fc7be4b00ba47ce4f5787482f287" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.036585 4647 scope.go:117] "RemoveContainer" containerID="dfe5f51562f95457637fa84764fa1b2996ec3b1fca9e5ebbb693c1255ea1f996" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.036915 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.047233 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.059139 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.061095 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.070001 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.070273 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.136068 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.140528 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.140626 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cf5f0db0-688a-43f4-b38e-8478858003fa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.140670 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljkrh\" (UniqueName: \"kubernetes.io/projected/cf5f0db0-688a-43f4-b38e-8478858003fa-kube-api-access-ljkrh\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.140713 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.140754 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-scripts\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.140775 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-config-data\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.140800 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf5f0db0-688a-43f4-b38e-8478858003fa-logs\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.140818 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.245097 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf5f0db0-688a-43f4-b38e-8478858003fa-logs\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.245234 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.245405 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.245509 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cf5f0db0-688a-43f4-b38e-8478858003fa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.245597 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljkrh\" (UniqueName: \"kubernetes.io/projected/cf5f0db0-688a-43f4-b38e-8478858003fa-kube-api-access-ljkrh\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.245689 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.245769 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-scripts\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.245838 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-config-data\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.245987 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cf5f0db0-688a-43f4-b38e-8478858003fa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.246450 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.245776 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf5f0db0-688a-43f4-b38e-8478858003fa-logs\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.254385 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.267995 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-scripts\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.271102 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.272249 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf5f0db0-688a-43f4-b38e-8478858003fa-config-data\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.276506 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljkrh\" (UniqueName: \"kubernetes.io/projected/cf5f0db0-688a-43f4-b38e-8478858003fa-kube-api-access-ljkrh\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.339118 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"cf5f0db0-688a-43f4-b38e-8478858003fa\") " pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.412697 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a15d5b4-a376-4f2e-bbf2-e04474864d2c" path="/var/lib/kubelet/pods/2a15d5b4-a376-4f2e-bbf2-e04474864d2c/volumes" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.423566 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c49d4cb-49bc-40f4-b1e9-03c730876bdb" path="/var/lib/kubelet/pods/4c49d4cb-49bc-40f4-b1e9-03c730876bdb/volumes" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.442111 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.780142 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.880788 4647 generic.go:334] "Generic (PLEG): container finished" podID="9c2c650d-ce2e-4087-bb19-f199ed233bcf" containerID="75b9c0aba4dd0443d7b9692e3f7888f1b05a8e989ad81cb621c6e5dca0c83caf" exitCode=0 Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.880869 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-7tcfv" event={"ID":"9c2c650d-ce2e-4087-bb19-f199ed233bcf","Type":"ContainerDied","Data":"75b9c0aba4dd0443d7b9692e3f7888f1b05a8e989ad81cb621c6e5dca0c83caf"} Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.889198 4647 generic.go:334] "Generic (PLEG): container finished" podID="63e70ac4-fcb7-4bce-a0b2-5c148e71514f" containerID="a62d7c5eea082182da67068ccddd5f0c14188f5c7d99fc1ff5be4a744c0d6dce" exitCode=0 Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.889327 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bkg6q" event={"ID":"63e70ac4-fcb7-4bce-a0b2-5c148e71514f","Type":"ContainerDied","Data":"a62d7c5eea082182da67068ccddd5f0c14188f5c7d99fc1ff5be4a744c0d6dce"} Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.896816 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"38dcdbad-1599-4387-8587-6676317adbc3","Type":"ContainerStarted","Data":"e14613ee46369efd755c4dc91940e0491aca5f5eeaab60645bee0c1b8dfe1ed4"} Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.906531 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19aa14a1-403e-4292-9136-6b6c514ba46c","Type":"ContainerStarted","Data":"8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25"} Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.919634 4647 generic.go:334] "Generic (PLEG): container finished" podID="4148fe73-7269-47f4-9c12-60ec17192441" containerID="ffa42b1cd2f4d658abc916d4d2b3df0217254fc3484da9fbfe7abd0c5f337b64" exitCode=0 Nov 28 15:44:32 crc kubenswrapper[4647]: I1128 15:44:32.919696 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-6v9rt" event={"ID":"4148fe73-7269-47f4-9c12-60ec17192441","Type":"ContainerDied","Data":"ffa42b1cd2f4d658abc916d4d2b3df0217254fc3484da9fbfe7abd0c5f337b64"} Nov 28 15:44:33 crc kubenswrapper[4647]: I1128 15:44:33.158826 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:44:33 crc kubenswrapper[4647]: W1128 15:44:33.169639 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf5f0db0_688a_43f4_b38e_8478858003fa.slice/crio-e2e0b3cb36e41c19d772d478cf6b79e9c1fdf6ebbe85503780a57fb95edb7ee9 WatchSource:0}: Error finding container e2e0b3cb36e41c19d772d478cf6b79e9c1fdf6ebbe85503780a57fb95edb7ee9: Status 404 returned error can't find the container with id e2e0b3cb36e41c19d772d478cf6b79e9c1fdf6ebbe85503780a57fb95edb7ee9 Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.014543 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19aa14a1-403e-4292-9136-6b6c514ba46c","Type":"ContainerStarted","Data":"bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f"} Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.019845 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"38dcdbad-1599-4387-8587-6676317adbc3","Type":"ContainerStarted","Data":"d0023a0403c4dd9aecdf7dfa1c71b542f88fd9b71ccb32d1110dad7af967db6c"} Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.025704 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cf5f0db0-688a-43f4-b38e-8478858003fa","Type":"ContainerStarted","Data":"e2e0b3cb36e41c19d772d478cf6b79e9c1fdf6ebbe85503780a57fb95edb7ee9"} Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.731152 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6v9rt" Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.740915 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bkg6q" Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.771074 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-7tcfv" Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.839249 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66ghq\" (UniqueName: \"kubernetes.io/projected/4148fe73-7269-47f4-9c12-60ec17192441-kube-api-access-66ghq\") pod \"4148fe73-7269-47f4-9c12-60ec17192441\" (UID: \"4148fe73-7269-47f4-9c12-60ec17192441\") " Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.839780 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ms65\" (UniqueName: \"kubernetes.io/projected/63e70ac4-fcb7-4bce-a0b2-5c148e71514f-kube-api-access-5ms65\") pod \"63e70ac4-fcb7-4bce-a0b2-5c148e71514f\" (UID: \"63e70ac4-fcb7-4bce-a0b2-5c148e71514f\") " Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.839871 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcssp\" (UniqueName: \"kubernetes.io/projected/9c2c650d-ce2e-4087-bb19-f199ed233bcf-kube-api-access-mcssp\") pod \"9c2c650d-ce2e-4087-bb19-f199ed233bcf\" (UID: \"9c2c650d-ce2e-4087-bb19-f199ed233bcf\") " Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.843749 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63e70ac4-fcb7-4bce-a0b2-5c148e71514f-kube-api-access-5ms65" (OuterVolumeSpecName: "kube-api-access-5ms65") pod "63e70ac4-fcb7-4bce-a0b2-5c148e71514f" (UID: "63e70ac4-fcb7-4bce-a0b2-5c148e71514f"). InnerVolumeSpecName "kube-api-access-5ms65". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.850126 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4148fe73-7269-47f4-9c12-60ec17192441-kube-api-access-66ghq" (OuterVolumeSpecName: "kube-api-access-66ghq") pod "4148fe73-7269-47f4-9c12-60ec17192441" (UID: "4148fe73-7269-47f4-9c12-60ec17192441"). InnerVolumeSpecName "kube-api-access-66ghq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.850210 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c2c650d-ce2e-4087-bb19-f199ed233bcf-kube-api-access-mcssp" (OuterVolumeSpecName: "kube-api-access-mcssp") pod "9c2c650d-ce2e-4087-bb19-f199ed233bcf" (UID: "9c2c650d-ce2e-4087-bb19-f199ed233bcf"). InnerVolumeSpecName "kube-api-access-mcssp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.941966 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66ghq\" (UniqueName: \"kubernetes.io/projected/4148fe73-7269-47f4-9c12-60ec17192441-kube-api-access-66ghq\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.942175 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ms65\" (UniqueName: \"kubernetes.io/projected/63e70ac4-fcb7-4bce-a0b2-5c148e71514f-kube-api-access-5ms65\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:34 crc kubenswrapper[4647]: I1128 15:44:34.942237 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcssp\" (UniqueName: \"kubernetes.io/projected/9c2c650d-ce2e-4087-bb19-f199ed233bcf-kube-api-access-mcssp\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.082294 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-7tcfv" event={"ID":"9c2c650d-ce2e-4087-bb19-f199ed233bcf","Type":"ContainerDied","Data":"2b5f11fe8045ab260d9124996c5ed4b7d0b32596b742415b8798424ca800b7dd"} Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.082340 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b5f11fe8045ab260d9124996c5ed4b7d0b32596b742415b8798424ca800b7dd" Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.082404 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-7tcfv" Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.117218 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bkg6q" event={"ID":"63e70ac4-fcb7-4bce-a0b2-5c148e71514f","Type":"ContainerDied","Data":"8d1072024af1af1e0e53f199c794be6f137f33468056085c05a92d10471ffb39"} Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.117269 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d1072024af1af1e0e53f199c794be6f137f33468056085c05a92d10471ffb39" Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.117352 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bkg6q" Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.130992 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"38dcdbad-1599-4387-8587-6676317adbc3","Type":"ContainerStarted","Data":"d81b35e00fa4c1a5e49070df6397b2e0848bd9200e6111ef95970183d4e9f00f"} Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.141057 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cf5f0db0-688a-43f4-b38e-8478858003fa","Type":"ContainerStarted","Data":"5c4d64161fa39587a4325448d695069c0bab295ecce6d1f9b994b8e9bab56746"} Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.149875 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-6v9rt" event={"ID":"4148fe73-7269-47f4-9c12-60ec17192441","Type":"ContainerDied","Data":"fe715d005adb5904a97b031e5f9f4debe63f791573d3743755514541df4386f3"} Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.149920 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe715d005adb5904a97b031e5f9f4debe63f791573d3743755514541df4386f3" Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.149979 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6v9rt" Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.427784 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-66c6c46cdb-xgv7h" podUID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 28 15:44:35 crc kubenswrapper[4647]: I1128 15:44:35.770567 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.770522553 podStartE2EDuration="4.770522553s" podCreationTimestamp="2025-11-28 15:44:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:35.18102091 +0000 UTC m=+1205.028627321" watchObservedRunningTime="2025-11-28 15:44:35.770522553 +0000 UTC m=+1205.618128984" Nov 28 15:44:36 crc kubenswrapper[4647]: I1128 15:44:36.203350 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19aa14a1-403e-4292-9136-6b6c514ba46c","Type":"ContainerStarted","Data":"0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10"} Nov 28 15:44:36 crc kubenswrapper[4647]: I1128 15:44:36.203539 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="ceilometer-central-agent" containerID="cri-o://198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e" gracePeriod=30 Nov 28 15:44:36 crc kubenswrapper[4647]: I1128 15:44:36.203697 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="ceilometer-notification-agent" containerID="cri-o://8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25" gracePeriod=30 Nov 28 15:44:36 crc kubenswrapper[4647]: I1128 15:44:36.203741 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="sg-core" containerID="cri-o://bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f" gracePeriod=30 Nov 28 15:44:36 crc kubenswrapper[4647]: I1128 15:44:36.203947 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="proxy-httpd" containerID="cri-o://0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10" gracePeriod=30 Nov 28 15:44:36 crc kubenswrapper[4647]: I1128 15:44:36.206326 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:44:36 crc kubenswrapper[4647]: I1128 15:44:36.241994 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=12.398932447 podStartE2EDuration="17.241976521s" podCreationTimestamp="2025-11-28 15:44:19 +0000 UTC" firstStartedPulling="2025-11-28 15:44:30.093804898 +0000 UTC m=+1199.941411309" lastFinishedPulling="2025-11-28 15:44:34.936848962 +0000 UTC m=+1204.784455383" observedRunningTime="2025-11-28 15:44:36.237924243 +0000 UTC m=+1206.085530664" watchObservedRunningTime="2025-11-28 15:44:36.241976521 +0000 UTC m=+1206.089582942" Nov 28 15:44:37 crc kubenswrapper[4647]: I1128 15:44:37.216566 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cf5f0db0-688a-43f4-b38e-8478858003fa","Type":"ContainerStarted","Data":"04b65a6fcd09144b7c85d938afb5dc752e3787229060ff423b2474e05120e278"} Nov 28 15:44:37 crc kubenswrapper[4647]: I1128 15:44:37.221110 4647 generic.go:334] "Generic (PLEG): container finished" podID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerID="0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10" exitCode=0 Nov 28 15:44:37 crc kubenswrapper[4647]: I1128 15:44:37.221394 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19aa14a1-403e-4292-9136-6b6c514ba46c","Type":"ContainerDied","Data":"0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10"} Nov 28 15:44:38 crc kubenswrapper[4647]: I1128 15:44:38.238634 4647 generic.go:334] "Generic (PLEG): container finished" podID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerID="bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f" exitCode=2 Nov 28 15:44:38 crc kubenswrapper[4647]: I1128 15:44:38.240278 4647 generic.go:334] "Generic (PLEG): container finished" podID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerID="8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25" exitCode=0 Nov 28 15:44:38 crc kubenswrapper[4647]: I1128 15:44:38.238713 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19aa14a1-403e-4292-9136-6b6c514ba46c","Type":"ContainerDied","Data":"bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f"} Nov 28 15:44:38 crc kubenswrapper[4647]: I1128 15:44:38.240512 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19aa14a1-403e-4292-9136-6b6c514ba46c","Type":"ContainerDied","Data":"8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25"} Nov 28 15:44:38 crc kubenswrapper[4647]: I1128 15:44:38.285113 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.285076334 podStartE2EDuration="6.285076334s" podCreationTimestamp="2025-11-28 15:44:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:38.266566974 +0000 UTC m=+1208.114173405" watchObservedRunningTime="2025-11-28 15:44:38.285076334 +0000 UTC m=+1208.132682755" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.019521 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.166482 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-log-httpd\") pod \"19aa14a1-403e-4292-9136-6b6c514ba46c\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.166582 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-sg-core-conf-yaml\") pod \"19aa14a1-403e-4292-9136-6b6c514ba46c\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.166644 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-config-data\") pod \"19aa14a1-403e-4292-9136-6b6c514ba46c\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.166700 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-scripts\") pod \"19aa14a1-403e-4292-9136-6b6c514ba46c\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.166846 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96rt8\" (UniqueName: \"kubernetes.io/projected/19aa14a1-403e-4292-9136-6b6c514ba46c-kube-api-access-96rt8\") pod \"19aa14a1-403e-4292-9136-6b6c514ba46c\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.166866 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-combined-ca-bundle\") pod \"19aa14a1-403e-4292-9136-6b6c514ba46c\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.166911 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-run-httpd\") pod \"19aa14a1-403e-4292-9136-6b6c514ba46c\" (UID: \"19aa14a1-403e-4292-9136-6b6c514ba46c\") " Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.168301 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "19aa14a1-403e-4292-9136-6b6c514ba46c" (UID: "19aa14a1-403e-4292-9136-6b6c514ba46c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.169338 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "19aa14a1-403e-4292-9136-6b6c514ba46c" (UID: "19aa14a1-403e-4292-9136-6b6c514ba46c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.182595 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-scripts" (OuterVolumeSpecName: "scripts") pod "19aa14a1-403e-4292-9136-6b6c514ba46c" (UID: "19aa14a1-403e-4292-9136-6b6c514ba46c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.183867 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19aa14a1-403e-4292-9136-6b6c514ba46c-kube-api-access-96rt8" (OuterVolumeSpecName: "kube-api-access-96rt8") pod "19aa14a1-403e-4292-9136-6b6c514ba46c" (UID: "19aa14a1-403e-4292-9136-6b6c514ba46c"). InnerVolumeSpecName "kube-api-access-96rt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.265601 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "19aa14a1-403e-4292-9136-6b6c514ba46c" (UID: "19aa14a1-403e-4292-9136-6b6c514ba46c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.269128 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96rt8\" (UniqueName: \"kubernetes.io/projected/19aa14a1-403e-4292-9136-6b6c514ba46c-kube-api-access-96rt8\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.269167 4647 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.269178 4647 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19aa14a1-403e-4292-9136-6b6c514ba46c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.269188 4647 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.269198 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.271027 4647 generic.go:334] "Generic (PLEG): container finished" podID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerID="198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e" exitCode=0 Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.271132 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19aa14a1-403e-4292-9136-6b6c514ba46c","Type":"ContainerDied","Data":"198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e"} Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.271168 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19aa14a1-403e-4292-9136-6b6c514ba46c","Type":"ContainerDied","Data":"cea1d03a5a62a9710b50eb9c1a42d744de90f3ac6b54b40c8b71dc6a37bad7d0"} Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.271187 4647 scope.go:117] "RemoveContainer" containerID="0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.271323 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.295586 4647 scope.go:117] "RemoveContainer" containerID="bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.319194 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19aa14a1-403e-4292-9136-6b6c514ba46c" (UID: "19aa14a1-403e-4292-9136-6b6c514ba46c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.320152 4647 scope.go:117] "RemoveContainer" containerID="8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.341970 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-config-data" (OuterVolumeSpecName: "config-data") pod "19aa14a1-403e-4292-9136-6b6c514ba46c" (UID: "19aa14a1-403e-4292-9136-6b6c514ba46c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.356708 4647 scope.go:117] "RemoveContainer" containerID="198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.371124 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.371167 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19aa14a1-403e-4292-9136-6b6c514ba46c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.385126 4647 scope.go:117] "RemoveContainer" containerID="0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10" Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.385742 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10\": container with ID starting with 0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10 not found: ID does not exist" containerID="0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.385779 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10"} err="failed to get container status \"0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10\": rpc error: code = NotFound desc = could not find container \"0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10\": container with ID starting with 0a495a3e6f53f86e10a9a3f202d2381547bd5df37508beb6b9cc0f1b9882de10 not found: ID does not exist" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.385801 4647 scope.go:117] "RemoveContainer" containerID="bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f" Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.386238 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f\": container with ID starting with bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f not found: ID does not exist" containerID="bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.386261 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f"} err="failed to get container status \"bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f\": rpc error: code = NotFound desc = could not find container \"bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f\": container with ID starting with bbae78135058df7f62f98ace2ab53b597d7e82246391c697005fb6f97b002a7f not found: ID does not exist" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.386275 4647 scope.go:117] "RemoveContainer" containerID="8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25" Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.386544 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25\": container with ID starting with 8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25 not found: ID does not exist" containerID="8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.386564 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25"} err="failed to get container status \"8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25\": rpc error: code = NotFound desc = could not find container \"8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25\": container with ID starting with 8564fd8347e08bb576e7319f84f871c7ccd59b686917f829fd8470fd503d1e25 not found: ID does not exist" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.386577 4647 scope.go:117] "RemoveContainer" containerID="198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e" Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.386942 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e\": container with ID starting with 198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e not found: ID does not exist" containerID="198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.387007 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e"} err="failed to get container status \"198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e\": rpc error: code = NotFound desc = could not find container \"198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e\": container with ID starting with 198eebe89af70f2b9086d0b2ad90056910cca8ca234d91edaf82bced148b3d2e not found: ID does not exist" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.611919 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.627226 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.658639 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.659138 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4148fe73-7269-47f4-9c12-60ec17192441" containerName="mariadb-database-create" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659160 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4148fe73-7269-47f4-9c12-60ec17192441" containerName="mariadb-database-create" Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.659178 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c2c650d-ce2e-4087-bb19-f199ed233bcf" containerName="mariadb-database-create" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659186 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c2c650d-ce2e-4087-bb19-f199ed233bcf" containerName="mariadb-database-create" Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.659201 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="proxy-httpd" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659211 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="proxy-httpd" Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.659231 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="ceilometer-notification-agent" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659238 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="ceilometer-notification-agent" Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.659250 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="sg-core" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659256 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="sg-core" Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.659269 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63e70ac4-fcb7-4bce-a0b2-5c148e71514f" containerName="mariadb-database-create" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659276 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="63e70ac4-fcb7-4bce-a0b2-5c148e71514f" containerName="mariadb-database-create" Nov 28 15:44:40 crc kubenswrapper[4647]: E1128 15:44:40.659291 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="ceilometer-central-agent" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659298 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="ceilometer-central-agent" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659514 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="sg-core" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659532 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c2c650d-ce2e-4087-bb19-f199ed233bcf" containerName="mariadb-database-create" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659539 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="proxy-httpd" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659547 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="63e70ac4-fcb7-4bce-a0b2-5c148e71514f" containerName="mariadb-database-create" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659564 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="ceilometer-notification-agent" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659573 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" containerName="ceilometer-central-agent" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.659587 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="4148fe73-7269-47f4-9c12-60ec17192441" containerName="mariadb-database-create" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.662798 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.667342 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.684090 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.731312 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.783123 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-run-httpd\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.783223 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-log-httpd\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.783268 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-config-data\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.783316 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.783354 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxxq5\" (UniqueName: \"kubernetes.io/projected/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-kube-api-access-jxxq5\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.783390 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-scripts\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.783432 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.885072 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.885163 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxxq5\" (UniqueName: \"kubernetes.io/projected/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-kube-api-access-jxxq5\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.885207 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-scripts\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.885230 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.885295 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-run-httpd\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.885342 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-log-httpd\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.885388 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-config-data\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.886173 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-run-httpd\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.886390 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-log-httpd\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.891656 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-config-data\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.892265 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.892939 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.904024 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-scripts\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.913192 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxxq5\" (UniqueName: \"kubernetes.io/projected/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-kube-api-access-jxxq5\") pod \"ceilometer-0\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " pod="openstack/ceilometer-0" Nov 28 15:44:40 crc kubenswrapper[4647]: I1128 15:44:40.994267 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:41 crc kubenswrapper[4647]: I1128 15:44:41.557850 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:41 crc kubenswrapper[4647]: I1128 15:44:41.730432 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:41 crc kubenswrapper[4647]: I1128 15:44:41.730529 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:41 crc kubenswrapper[4647]: I1128 15:44:41.777709 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:41 crc kubenswrapper[4647]: I1128 15:44:41.787701 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.121054 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.370615 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdf2ec5-b79f-4861-8b39-9ac0d321f922","Type":"ContainerStarted","Data":"df21d48e768c0d209f8fc9697abec7cef05fd83eaa8084ce2288ea034d9653cf"} Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.371182 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.371204 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.416372 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19aa14a1-403e-4292-9136-6b6c514ba46c" path="/var/lib/kubelet/pods/19aa14a1-403e-4292-9136-6b6c514ba46c/volumes" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.442495 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.442559 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.498930 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.499391 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.784318 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-662e-account-create-rqmwd"] Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.786542 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-662e-account-create-rqmwd" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.789810 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.814525 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-662e-account-create-rqmwd"] Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.857678 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxjnq\" (UniqueName: \"kubernetes.io/projected/d6933167-f4cc-48bf-a880-f05c6f1dee6e-kube-api-access-nxjnq\") pod \"nova-api-662e-account-create-rqmwd\" (UID: \"d6933167-f4cc-48bf-a880-f05c6f1dee6e\") " pod="openstack/nova-api-662e-account-create-rqmwd" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.959628 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxjnq\" (UniqueName: \"kubernetes.io/projected/d6933167-f4cc-48bf-a880-f05c6f1dee6e-kube-api-access-nxjnq\") pod \"nova-api-662e-account-create-rqmwd\" (UID: \"d6933167-f4cc-48bf-a880-f05c6f1dee6e\") " pod="openstack/nova-api-662e-account-create-rqmwd" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.979078 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-7291-account-create-7r4r7"] Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.980644 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7291-account-create-7r4r7" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.988301 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.994753 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7291-account-create-7r4r7"] Nov 28 15:44:42 crc kubenswrapper[4647]: I1128 15:44:42.998678 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxjnq\" (UniqueName: \"kubernetes.io/projected/d6933167-f4cc-48bf-a880-f05c6f1dee6e-kube-api-access-nxjnq\") pod \"nova-api-662e-account-create-rqmwd\" (UID: \"d6933167-f4cc-48bf-a880-f05c6f1dee6e\") " pod="openstack/nova-api-662e-account-create-rqmwd" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.062936 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6swj\" (UniqueName: \"kubernetes.io/projected/691a522b-98e3-4e61-85e1-5f66167bd77e-kube-api-access-w6swj\") pod \"nova-cell0-7291-account-create-7r4r7\" (UID: \"691a522b-98e3-4e61-85e1-5f66167bd77e\") " pod="openstack/nova-cell0-7291-account-create-7r4r7" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.107136 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-662e-account-create-rqmwd" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.168786 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6swj\" (UniqueName: \"kubernetes.io/projected/691a522b-98e3-4e61-85e1-5f66167bd77e-kube-api-access-w6swj\") pod \"nova-cell0-7291-account-create-7r4r7\" (UID: \"691a522b-98e3-4e61-85e1-5f66167bd77e\") " pod="openstack/nova-cell0-7291-account-create-7r4r7" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.218461 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-f1f8-account-create-cxfkp"] Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.219757 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f1f8-account-create-cxfkp" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.226714 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.257513 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-f1f8-account-create-cxfkp"] Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.287248 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6swj\" (UniqueName: \"kubernetes.io/projected/691a522b-98e3-4e61-85e1-5f66167bd77e-kube-api-access-w6swj\") pod \"nova-cell0-7291-account-create-7r4r7\" (UID: \"691a522b-98e3-4e61-85e1-5f66167bd77e\") " pod="openstack/nova-cell0-7291-account-create-7r4r7" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.308035 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7291-account-create-7r4r7" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.373225 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zn9g9\" (UniqueName: \"kubernetes.io/projected/a5b9747f-3a90-42ca-885e-119cdcb53280-kube-api-access-zn9g9\") pod \"nova-cell1-f1f8-account-create-cxfkp\" (UID: \"a5b9747f-3a90-42ca-885e-119cdcb53280\") " pod="openstack/nova-cell1-f1f8-account-create-cxfkp" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.387505 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdf2ec5-b79f-4861-8b39-9ac0d321f922","Type":"ContainerStarted","Data":"ddbe8ceba0322cdf118fba41510f02aaedbf84e0902f65aa205968ecc6116276"} Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.388495 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.388519 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.475429 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zn9g9\" (UniqueName: \"kubernetes.io/projected/a5b9747f-3a90-42ca-885e-119cdcb53280-kube-api-access-zn9g9\") pod \"nova-cell1-f1f8-account-create-cxfkp\" (UID: \"a5b9747f-3a90-42ca-885e-119cdcb53280\") " pod="openstack/nova-cell1-f1f8-account-create-cxfkp" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.504339 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zn9g9\" (UniqueName: \"kubernetes.io/projected/a5b9747f-3a90-42ca-885e-119cdcb53280-kube-api-access-zn9g9\") pod \"nova-cell1-f1f8-account-create-cxfkp\" (UID: \"a5b9747f-3a90-42ca-885e-119cdcb53280\") " pod="openstack/nova-cell1-f1f8-account-create-cxfkp" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.693004 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f1f8-account-create-cxfkp" Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.821298 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7291-account-create-7r4r7"] Nov 28 15:44:43 crc kubenswrapper[4647]: I1128 15:44:43.936887 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-662e-account-create-rqmwd"] Nov 28 15:44:44 crc kubenswrapper[4647]: I1128 15:44:44.303079 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-f1f8-account-create-cxfkp"] Nov 28 15:44:44 crc kubenswrapper[4647]: I1128 15:44:44.451387 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-7291-account-create-7r4r7" podStartSLOduration=2.451347276 podStartE2EDuration="2.451347276s" podCreationTimestamp="2025-11-28 15:44:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:44.428687246 +0000 UTC m=+1214.276293657" watchObservedRunningTime="2025-11-28 15:44:44.451347276 +0000 UTC m=+1214.298953697" Nov 28 15:44:44 crc kubenswrapper[4647]: I1128 15:44:44.451481 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdf2ec5-b79f-4861-8b39-9ac0d321f922","Type":"ContainerStarted","Data":"fb9ec20e75609a9f0ac33cac0b56bdb0ca8fe557a4aa496499a9a6dfa9d4e4fb"} Nov 28 15:44:44 crc kubenswrapper[4647]: I1128 15:44:44.451528 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7291-account-create-7r4r7" event={"ID":"691a522b-98e3-4e61-85e1-5f66167bd77e","Type":"ContainerStarted","Data":"8014c989c1857fa51b69435591fd0d2840fd173c803ad8365d77085a9f9975d3"} Nov 28 15:44:44 crc kubenswrapper[4647]: I1128 15:44:44.451541 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7291-account-create-7r4r7" event={"ID":"691a522b-98e3-4e61-85e1-5f66167bd77e","Type":"ContainerStarted","Data":"575c14c1da8f90ef0b8b018d5ad681019a13ee0c8fb4b1d5fb62bdf05450c100"} Nov 28 15:44:44 crc kubenswrapper[4647]: I1128 15:44:44.451552 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f1f8-account-create-cxfkp" event={"ID":"a5b9747f-3a90-42ca-885e-119cdcb53280","Type":"ContainerStarted","Data":"7e56663189bede3b405581a5c45951f494b5bc3610c60a367f78b744977ddd10"} Nov 28 15:44:44 crc kubenswrapper[4647]: I1128 15:44:44.451563 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-662e-account-create-rqmwd" event={"ID":"d6933167-f4cc-48bf-a880-f05c6f1dee6e","Type":"ContainerStarted","Data":"83f0cff2ae34634dc7ae02f1362fbbf7e9ed0c0db6c3ac8e52c086c42cadd795"} Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.426221 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-66c6c46cdb-xgv7h" podUID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.427239 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.428595 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"3c18b91e7b1439f8227b948f2281e3ee534d373728f5845fa589469f1c989899"} pod="openstack/horizon-66c6c46cdb-xgv7h" containerMessage="Container horizon failed startup probe, will be restarted" Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.428644 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-66c6c46cdb-xgv7h" podUID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerName="horizon" containerID="cri-o://3c18b91e7b1439f8227b948f2281e3ee534d373728f5845fa589469f1c989899" gracePeriod=30 Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.444981 4647 generic.go:334] "Generic (PLEG): container finished" podID="691a522b-98e3-4e61-85e1-5f66167bd77e" containerID="8014c989c1857fa51b69435591fd0d2840fd173c803ad8365d77085a9f9975d3" exitCode=0 Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.445389 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7291-account-create-7r4r7" event={"ID":"691a522b-98e3-4e61-85e1-5f66167bd77e","Type":"ContainerDied","Data":"8014c989c1857fa51b69435591fd0d2840fd173c803ad8365d77085a9f9975d3"} Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.450406 4647 generic.go:334] "Generic (PLEG): container finished" podID="a5b9747f-3a90-42ca-885e-119cdcb53280" containerID="d0abd98ff788e15b9b1243ec0a4ade0ffe868ae935541fe896a4121588b7b697" exitCode=0 Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.450494 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f1f8-account-create-cxfkp" event={"ID":"a5b9747f-3a90-42ca-885e-119cdcb53280","Type":"ContainerDied","Data":"d0abd98ff788e15b9b1243ec0a4ade0ffe868ae935541fe896a4121588b7b697"} Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.452902 4647 generic.go:334] "Generic (PLEG): container finished" podID="d6933167-f4cc-48bf-a880-f05c6f1dee6e" containerID="79c16e47fd6e3bd5826af7ddd6f8b6f9693e3583e2e655006211cb03dc7a2ecc" exitCode=0 Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.452966 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-662e-account-create-rqmwd" event={"ID":"d6933167-f4cc-48bf-a880-f05c6f1dee6e","Type":"ContainerDied","Data":"79c16e47fd6e3bd5826af7ddd6f8b6f9693e3583e2e655006211cb03dc7a2ecc"} Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.456322 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.456345 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:44:45 crc kubenswrapper[4647]: I1128 15:44:45.456306 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdf2ec5-b79f-4861-8b39-9ac0d321f922","Type":"ContainerStarted","Data":"a1e92f8783e35cd9d2119bb958f40a77a9680d348d0e20a52f200494d5ce9aa4"} Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.093052 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7291-account-create-7r4r7" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.197792 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6swj\" (UniqueName: \"kubernetes.io/projected/691a522b-98e3-4e61-85e1-5f66167bd77e-kube-api-access-w6swj\") pod \"691a522b-98e3-4e61-85e1-5f66167bd77e\" (UID: \"691a522b-98e3-4e61-85e1-5f66167bd77e\") " Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.206904 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f1f8-account-create-cxfkp" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.222897 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-662e-account-create-rqmwd" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.226819 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/691a522b-98e3-4e61-85e1-5f66167bd77e-kube-api-access-w6swj" (OuterVolumeSpecName: "kube-api-access-w6swj") pod "691a522b-98e3-4e61-85e1-5f66167bd77e" (UID: "691a522b-98e3-4e61-85e1-5f66167bd77e"). InnerVolumeSpecName "kube-api-access-w6swj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.300681 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxjnq\" (UniqueName: \"kubernetes.io/projected/d6933167-f4cc-48bf-a880-f05c6f1dee6e-kube-api-access-nxjnq\") pod \"d6933167-f4cc-48bf-a880-f05c6f1dee6e\" (UID: \"d6933167-f4cc-48bf-a880-f05c6f1dee6e\") " Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.300752 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zn9g9\" (UniqueName: \"kubernetes.io/projected/a5b9747f-3a90-42ca-885e-119cdcb53280-kube-api-access-zn9g9\") pod \"a5b9747f-3a90-42ca-885e-119cdcb53280\" (UID: \"a5b9747f-3a90-42ca-885e-119cdcb53280\") " Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.301331 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6swj\" (UniqueName: \"kubernetes.io/projected/691a522b-98e3-4e61-85e1-5f66167bd77e-kube-api-access-w6swj\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.311771 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6933167-f4cc-48bf-a880-f05c6f1dee6e-kube-api-access-nxjnq" (OuterVolumeSpecName: "kube-api-access-nxjnq") pod "d6933167-f4cc-48bf-a880-f05c6f1dee6e" (UID: "d6933167-f4cc-48bf-a880-f05c6f1dee6e"). InnerVolumeSpecName "kube-api-access-nxjnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.325900 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5b9747f-3a90-42ca-885e-119cdcb53280-kube-api-access-zn9g9" (OuterVolumeSpecName: "kube-api-access-zn9g9") pod "a5b9747f-3a90-42ca-885e-119cdcb53280" (UID: "a5b9747f-3a90-42ca-885e-119cdcb53280"). InnerVolumeSpecName "kube-api-access-zn9g9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.404737 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxjnq\" (UniqueName: \"kubernetes.io/projected/d6933167-f4cc-48bf-a880-f05c6f1dee6e-kube-api-access-nxjnq\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.404780 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zn9g9\" (UniqueName: \"kubernetes.io/projected/a5b9747f-3a90-42ca-885e-119cdcb53280-kube-api-access-zn9g9\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.483523 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-f1f8-account-create-cxfkp" event={"ID":"a5b9747f-3a90-42ca-885e-119cdcb53280","Type":"ContainerDied","Data":"7e56663189bede3b405581a5c45951f494b5bc3610c60a367f78b744977ddd10"} Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.483578 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e56663189bede3b405581a5c45951f494b5bc3610c60a367f78b744977ddd10" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.483607 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-f1f8-account-create-cxfkp" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.484719 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-662e-account-create-rqmwd" event={"ID":"d6933167-f4cc-48bf-a880-f05c6f1dee6e","Type":"ContainerDied","Data":"83f0cff2ae34634dc7ae02f1362fbbf7e9ed0c0db6c3ac8e52c086c42cadd795"} Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.484744 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83f0cff2ae34634dc7ae02f1362fbbf7e9ed0c0db6c3ac8e52c086c42cadd795" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.484812 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-662e-account-create-rqmwd" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.498229 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdf2ec5-b79f-4861-8b39-9ac0d321f922","Type":"ContainerStarted","Data":"3c1ca3d194661628221441df83dd25c2f195cc24a8eadb07da4b609a9d2f78eb"} Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.498447 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="ceilometer-central-agent" containerID="cri-o://ddbe8ceba0322cdf118fba41510f02aaedbf84e0902f65aa205968ecc6116276" gracePeriod=30 Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.498696 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.498981 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="proxy-httpd" containerID="cri-o://3c1ca3d194661628221441df83dd25c2f195cc24a8eadb07da4b609a9d2f78eb" gracePeriod=30 Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.499036 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="sg-core" containerID="cri-o://a1e92f8783e35cd9d2119bb958f40a77a9680d348d0e20a52f200494d5ce9aa4" gracePeriod=30 Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.499074 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="ceilometer-notification-agent" containerID="cri-o://fb9ec20e75609a9f0ac33cac0b56bdb0ca8fe557a4aa496499a9a6dfa9d4e4fb" gracePeriod=30 Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.508633 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7291-account-create-7r4r7" event={"ID":"691a522b-98e3-4e61-85e1-5f66167bd77e","Type":"ContainerDied","Data":"575c14c1da8f90ef0b8b018d5ad681019a13ee0c8fb4b1d5fb62bdf05450c100"} Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.508663 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="575c14c1da8f90ef0b8b018d5ad681019a13ee0c8fb4b1d5fb62bdf05450c100" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.508703 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7291-account-create-7r4r7" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.524137 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.310418911 podStartE2EDuration="7.524118895s" podCreationTimestamp="2025-11-28 15:44:40 +0000 UTC" firstStartedPulling="2025-11-28 15:44:41.569915617 +0000 UTC m=+1211.417522038" lastFinishedPulling="2025-11-28 15:44:46.783615601 +0000 UTC m=+1216.631222022" observedRunningTime="2025-11-28 15:44:47.52016651 +0000 UTC m=+1217.367772931" watchObservedRunningTime="2025-11-28 15:44:47.524118895 +0000 UTC m=+1217.371725306" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.624310 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.624795 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.753150 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.753263 4647 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:44:47 crc kubenswrapper[4647]: I1128 15:44:47.766689 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.272863 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-78mbd"] Nov 28 15:44:48 crc kubenswrapper[4647]: E1128 15:44:48.273302 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="691a522b-98e3-4e61-85e1-5f66167bd77e" containerName="mariadb-account-create" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.273323 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="691a522b-98e3-4e61-85e1-5f66167bd77e" containerName="mariadb-account-create" Nov 28 15:44:48 crc kubenswrapper[4647]: E1128 15:44:48.273338 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5b9747f-3a90-42ca-885e-119cdcb53280" containerName="mariadb-account-create" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.273343 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5b9747f-3a90-42ca-885e-119cdcb53280" containerName="mariadb-account-create" Nov 28 15:44:48 crc kubenswrapper[4647]: E1128 15:44:48.273359 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6933167-f4cc-48bf-a880-f05c6f1dee6e" containerName="mariadb-account-create" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.273365 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6933167-f4cc-48bf-a880-f05c6f1dee6e" containerName="mariadb-account-create" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.273572 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="691a522b-98e3-4e61-85e1-5f66167bd77e" containerName="mariadb-account-create" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.273598 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6933167-f4cc-48bf-a880-f05c6f1dee6e" containerName="mariadb-account-create" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.273610 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5b9747f-3a90-42ca-885e-119cdcb53280" containerName="mariadb-account-create" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.274316 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.276843 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.277073 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.277198 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-8lpb6" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.306071 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-78mbd"] Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.429959 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvh86\" (UniqueName: \"kubernetes.io/projected/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-kube-api-access-dvh86\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.430092 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-config-data\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.430133 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.430172 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-scripts\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.527214 4647 generic.go:334] "Generic (PLEG): container finished" podID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerID="a1e92f8783e35cd9d2119bb958f40a77a9680d348d0e20a52f200494d5ce9aa4" exitCode=2 Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.527449 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdf2ec5-b79f-4861-8b39-9ac0d321f922","Type":"ContainerDied","Data":"a1e92f8783e35cd9d2119bb958f40a77a9680d348d0e20a52f200494d5ce9aa4"} Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.531708 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvh86\" (UniqueName: \"kubernetes.io/projected/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-kube-api-access-dvh86\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.531865 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-config-data\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.531900 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.531953 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-scripts\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.543344 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-config-data\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.553037 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-scripts\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.558126 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.572877 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvh86\" (UniqueName: \"kubernetes.io/projected/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-kube-api-access-dvh86\") pod \"nova-cell0-conductor-db-sync-78mbd\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.594903 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:44:48 crc kubenswrapper[4647]: I1128 15:44:48.617034 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:44:49 crc kubenswrapper[4647]: I1128 15:44:49.541843 4647 generic.go:334] "Generic (PLEG): container finished" podID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerID="fb9ec20e75609a9f0ac33cac0b56bdb0ca8fe557a4aa496499a9a6dfa9d4e4fb" exitCode=0 Nov 28 15:44:49 crc kubenswrapper[4647]: I1128 15:44:49.541917 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdf2ec5-b79f-4861-8b39-9ac0d321f922","Type":"ContainerDied","Data":"fb9ec20e75609a9f0ac33cac0b56bdb0ca8fe557a4aa496499a9a6dfa9d4e4fb"} Nov 28 15:44:49 crc kubenswrapper[4647]: I1128 15:44:49.565776 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-78mbd"] Nov 28 15:44:49 crc kubenswrapper[4647]: W1128 15:44:49.571692 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod575d03bd_0f8a_4f54_b6cb_2fdd4b48365b.slice/crio-6cce4d8eff7f9ec55255f8fedd3f12197f65112e7f329ca40c1ae5ebbc375464 WatchSource:0}: Error finding container 6cce4d8eff7f9ec55255f8fedd3f12197f65112e7f329ca40c1ae5ebbc375464: Status 404 returned error can't find the container with id 6cce4d8eff7f9ec55255f8fedd3f12197f65112e7f329ca40c1ae5ebbc375464 Nov 28 15:44:50 crc kubenswrapper[4647]: I1128 15:44:50.092713 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-rgvlr" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerName="registry-server" probeResult="failure" output=< Nov 28 15:44:50 crc kubenswrapper[4647]: timeout: health rpc did not complete within 1s Nov 28 15:44:50 crc kubenswrapper[4647]: > Nov 28 15:44:50 crc kubenswrapper[4647]: I1128 15:44:50.105934 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-rgvlr" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerName="registry-server" probeResult="failure" output=< Nov 28 15:44:50 crc kubenswrapper[4647]: timeout: health rpc did not complete within 1s Nov 28 15:44:50 crc kubenswrapper[4647]: > Nov 28 15:44:50 crc kubenswrapper[4647]: I1128 15:44:50.554728 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-78mbd" event={"ID":"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b","Type":"ContainerStarted","Data":"6cce4d8eff7f9ec55255f8fedd3f12197f65112e7f329ca40c1ae5ebbc375464"} Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.313490 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2"] Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.316904 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.320306 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.320644 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.352987 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2"] Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.401601 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6559e55c-8d5f-4838-a3d6-585ce3815b9e-config-volume\") pod \"collect-profiles-29405745-xjhw2\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.401978 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6559e55c-8d5f-4838-a3d6-585ce3815b9e-secret-volume\") pod \"collect-profiles-29405745-xjhw2\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.402049 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzgwc\" (UniqueName: \"kubernetes.io/projected/6559e55c-8d5f-4838-a3d6-585ce3815b9e-kube-api-access-gzgwc\") pod \"collect-profiles-29405745-xjhw2\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.503813 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6559e55c-8d5f-4838-a3d6-585ce3815b9e-config-volume\") pod \"collect-profiles-29405745-xjhw2\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.503894 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6559e55c-8d5f-4838-a3d6-585ce3815b9e-secret-volume\") pod \"collect-profiles-29405745-xjhw2\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.503968 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzgwc\" (UniqueName: \"kubernetes.io/projected/6559e55c-8d5f-4838-a3d6-585ce3815b9e-kube-api-access-gzgwc\") pod \"collect-profiles-29405745-xjhw2\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.505054 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6559e55c-8d5f-4838-a3d6-585ce3815b9e-config-volume\") pod \"collect-profiles-29405745-xjhw2\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.536603 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzgwc\" (UniqueName: \"kubernetes.io/projected/6559e55c-8d5f-4838-a3d6-585ce3815b9e-kube-api-access-gzgwc\") pod \"collect-profiles-29405745-xjhw2\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.538244 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6559e55c-8d5f-4838-a3d6-585ce3815b9e-secret-volume\") pod \"collect-profiles-29405745-xjhw2\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:00 crc kubenswrapper[4647]: I1128 15:45:00.671705 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:01 crc kubenswrapper[4647]: I1128 15:45:01.716152 4647 generic.go:334] "Generic (PLEG): container finished" podID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerID="ddbe8ceba0322cdf118fba41510f02aaedbf84e0902f65aa205968ecc6116276" exitCode=0 Nov 28 15:45:01 crc kubenswrapper[4647]: I1128 15:45:01.716314 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdf2ec5-b79f-4861-8b39-9ac0d321f922","Type":"ContainerDied","Data":"ddbe8ceba0322cdf118fba41510f02aaedbf84e0902f65aa205968ecc6116276"} Nov 28 15:45:02 crc kubenswrapper[4647]: I1128 15:45:02.493211 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2"] Nov 28 15:45:02 crc kubenswrapper[4647]: I1128 15:45:02.730144 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-78mbd" event={"ID":"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b","Type":"ContainerStarted","Data":"6aa6bd29accae665032efa8886f0ec5db3dad456a5157940edb15e3ba07f0b6f"} Nov 28 15:45:02 crc kubenswrapper[4647]: I1128 15:45:02.732356 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" event={"ID":"6559e55c-8d5f-4838-a3d6-585ce3815b9e","Type":"ContainerStarted","Data":"89159b9e18a6b502faf68ae1a1108a56f4b6fe58855e9cf32714323932c4cefa"} Nov 28 15:45:02 crc kubenswrapper[4647]: I1128 15:45:02.732395 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" event={"ID":"6559e55c-8d5f-4838-a3d6-585ce3815b9e","Type":"ContainerStarted","Data":"56ee724dd5a13ce203ddb9da7897534299b63c375be094b269eeee909a516d57"} Nov 28 15:45:02 crc kubenswrapper[4647]: I1128 15:45:02.746227 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-78mbd" podStartSLOduration=2.263517802 podStartE2EDuration="14.746208351s" podCreationTimestamp="2025-11-28 15:44:48 +0000 UTC" firstStartedPulling="2025-11-28 15:44:49.574201894 +0000 UTC m=+1219.421808315" lastFinishedPulling="2025-11-28 15:45:02.056892443 +0000 UTC m=+1231.904498864" observedRunningTime="2025-11-28 15:45:02.745725998 +0000 UTC m=+1232.593332419" watchObservedRunningTime="2025-11-28 15:45:02.746208351 +0000 UTC m=+1232.593814772" Nov 28 15:45:02 crc kubenswrapper[4647]: I1128 15:45:02.767052 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" podStartSLOduration=2.767035163 podStartE2EDuration="2.767035163s" podCreationTimestamp="2025-11-28 15:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:45:02.761108866 +0000 UTC m=+1232.608715287" watchObservedRunningTime="2025-11-28 15:45:02.767035163 +0000 UTC m=+1232.614641584" Nov 28 15:45:03 crc kubenswrapper[4647]: I1128 15:45:03.742556 4647 generic.go:334] "Generic (PLEG): container finished" podID="6559e55c-8d5f-4838-a3d6-585ce3815b9e" containerID="89159b9e18a6b502faf68ae1a1108a56f4b6fe58855e9cf32714323932c4cefa" exitCode=0 Nov 28 15:45:03 crc kubenswrapper[4647]: I1128 15:45:03.742593 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" event={"ID":"6559e55c-8d5f-4838-a3d6-585ce3815b9e","Type":"ContainerDied","Data":"89159b9e18a6b502faf68ae1a1108a56f4b6fe58855e9cf32714323932c4cefa"} Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.110580 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.200219 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6559e55c-8d5f-4838-a3d6-585ce3815b9e-secret-volume\") pod \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.200285 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzgwc\" (UniqueName: \"kubernetes.io/projected/6559e55c-8d5f-4838-a3d6-585ce3815b9e-kube-api-access-gzgwc\") pod \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.200522 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6559e55c-8d5f-4838-a3d6-585ce3815b9e-config-volume\") pod \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\" (UID: \"6559e55c-8d5f-4838-a3d6-585ce3815b9e\") " Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.201518 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6559e55c-8d5f-4838-a3d6-585ce3815b9e-config-volume" (OuterVolumeSpecName: "config-volume") pod "6559e55c-8d5f-4838-a3d6-585ce3815b9e" (UID: "6559e55c-8d5f-4838-a3d6-585ce3815b9e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.207231 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6559e55c-8d5f-4838-a3d6-585ce3815b9e-kube-api-access-gzgwc" (OuterVolumeSpecName: "kube-api-access-gzgwc") pod "6559e55c-8d5f-4838-a3d6-585ce3815b9e" (UID: "6559e55c-8d5f-4838-a3d6-585ce3815b9e"). InnerVolumeSpecName "kube-api-access-gzgwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.213056 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6559e55c-8d5f-4838-a3d6-585ce3815b9e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6559e55c-8d5f-4838-a3d6-585ce3815b9e" (UID: "6559e55c-8d5f-4838-a3d6-585ce3815b9e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.302650 4647 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6559e55c-8d5f-4838-a3d6-585ce3815b9e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.302684 4647 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6559e55c-8d5f-4838-a3d6-585ce3815b9e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.302694 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzgwc\" (UniqueName: \"kubernetes.io/projected/6559e55c-8d5f-4838-a3d6-585ce3815b9e-kube-api-access-gzgwc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.770456 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" event={"ID":"6559e55c-8d5f-4838-a3d6-585ce3815b9e","Type":"ContainerDied","Data":"56ee724dd5a13ce203ddb9da7897534299b63c375be094b269eeee909a516d57"} Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.770503 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56ee724dd5a13ce203ddb9da7897534299b63c375be094b269eeee909a516d57" Nov 28 15:45:05 crc kubenswrapper[4647]: I1128 15:45:05.770557 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2" Nov 28 15:45:13 crc kubenswrapper[4647]: I1128 15:45:13.828659 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/controller-f8648f98b-xqlc5" podUID="c4214344-1c2e-48f0-a1cb-c0a0414c8e77" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.48:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:45:14 crc kubenswrapper[4647]: I1128 15:45:14.316649 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 28 15:45:15 crc kubenswrapper[4647]: I1128 15:45:15.879200 4647 generic.go:334] "Generic (PLEG): container finished" podID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerID="3c18b91e7b1439f8227b948f2281e3ee534d373728f5845fa589469f1c989899" exitCode=137 Nov 28 15:45:15 crc kubenswrapper[4647]: I1128 15:45:15.879993 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66c6c46cdb-xgv7h" event={"ID":"278aef39-0aaf-4d33-b167-0f0cca8248fd","Type":"ContainerDied","Data":"3c18b91e7b1439f8227b948f2281e3ee534d373728f5845fa589469f1c989899"} Nov 28 15:45:15 crc kubenswrapper[4647]: I1128 15:45:15.880039 4647 scope.go:117] "RemoveContainer" containerID="420ea4980bc1bd80fd5479d418826195243c81ba3ab1d29a3b1bf8f7eb2fbb66" Nov 28 15:45:16 crc kubenswrapper[4647]: I1128 15:45:16.899275 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66c6c46cdb-xgv7h" event={"ID":"278aef39-0aaf-4d33-b167-0f0cca8248fd","Type":"ContainerStarted","Data":"30ed47beb8accdff9bc7ea28f2d962a4e31e3f85356b41ca918bcff22b41f706"} Nov 28 15:45:17 crc kubenswrapper[4647]: I1128 15:45:17.023273 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:45:17 crc kubenswrapper[4647]: I1128 15:45:17.023334 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:45:18 crc kubenswrapper[4647]: I1128 15:45:18.926619 4647 generic.go:334] "Generic (PLEG): container finished" podID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerID="3c1ca3d194661628221441df83dd25c2f195cc24a8eadb07da4b609a9d2f78eb" exitCode=137 Nov 28 15:45:18 crc kubenswrapper[4647]: I1128 15:45:18.926681 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdf2ec5-b79f-4861-8b39-9ac0d321f922","Type":"ContainerDied","Data":"3c1ca3d194661628221441df83dd25c2f195cc24a8eadb07da4b609a9d2f78eb"} Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.331431 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.338197 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-combined-ca-bundle\") pod \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.338272 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-log-httpd\") pod \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.338313 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-config-data\") pod \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.338378 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-run-httpd\") pod \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.338944 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8bdf2ec5-b79f-4861-8b39-9ac0d321f922" (UID: "8bdf2ec5-b79f-4861-8b39-9ac0d321f922"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.339492 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8bdf2ec5-b79f-4861-8b39-9ac0d321f922" (UID: "8bdf2ec5-b79f-4861-8b39-9ac0d321f922"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.339588 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxxq5\" (UniqueName: \"kubernetes.io/projected/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-kube-api-access-jxxq5\") pod \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.340375 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-sg-core-conf-yaml\") pod \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.340684 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-scripts\") pod \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\" (UID: \"8bdf2ec5-b79f-4861-8b39-9ac0d321f922\") " Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.341379 4647 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.341453 4647 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.346709 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-kube-api-access-jxxq5" (OuterVolumeSpecName: "kube-api-access-jxxq5") pod "8bdf2ec5-b79f-4861-8b39-9ac0d321f922" (UID: "8bdf2ec5-b79f-4861-8b39-9ac0d321f922"). InnerVolumeSpecName "kube-api-access-jxxq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.349885 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-scripts" (OuterVolumeSpecName: "scripts") pod "8bdf2ec5-b79f-4861-8b39-9ac0d321f922" (UID: "8bdf2ec5-b79f-4861-8b39-9ac0d321f922"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.425019 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8bdf2ec5-b79f-4861-8b39-9ac0d321f922" (UID: "8bdf2ec5-b79f-4861-8b39-9ac0d321f922"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.445399 4647 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.445458 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.445473 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxxq5\" (UniqueName: \"kubernetes.io/projected/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-kube-api-access-jxxq5\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.488222 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8bdf2ec5-b79f-4861-8b39-9ac0d321f922" (UID: "8bdf2ec5-b79f-4861-8b39-9ac0d321f922"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.516442 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-config-data" (OuterVolumeSpecName: "config-data") pod "8bdf2ec5-b79f-4861-8b39-9ac0d321f922" (UID: "8bdf2ec5-b79f-4861-8b39-9ac0d321f922"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.547104 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.547136 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bdf2ec5-b79f-4861-8b39-9ac0d321f922-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.941194 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdf2ec5-b79f-4861-8b39-9ac0d321f922","Type":"ContainerDied","Data":"df21d48e768c0d209f8fc9697abec7cef05fd83eaa8084ce2288ea034d9653cf"} Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.941632 4647 scope.go:117] "RemoveContainer" containerID="3c1ca3d194661628221441df83dd25c2f195cc24a8eadb07da4b609a9d2f78eb" Nov 28 15:45:19 crc kubenswrapper[4647]: I1128 15:45:19.941796 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.003537 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.011844 4647 scope.go:117] "RemoveContainer" containerID="a1e92f8783e35cd9d2119bb958f40a77a9680d348d0e20a52f200494d5ce9aa4" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.015840 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.032565 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:45:20 crc kubenswrapper[4647]: E1128 15:45:20.033341 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6559e55c-8d5f-4838-a3d6-585ce3815b9e" containerName="collect-profiles" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.033426 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6559e55c-8d5f-4838-a3d6-585ce3815b9e" containerName="collect-profiles" Nov 28 15:45:20 crc kubenswrapper[4647]: E1128 15:45:20.033522 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="ceilometer-notification-agent" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.033574 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="ceilometer-notification-agent" Nov 28 15:45:20 crc kubenswrapper[4647]: E1128 15:45:20.033629 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="sg-core" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.033716 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="sg-core" Nov 28 15:45:20 crc kubenswrapper[4647]: E1128 15:45:20.033783 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="ceilometer-central-agent" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.033842 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="ceilometer-central-agent" Nov 28 15:45:20 crc kubenswrapper[4647]: E1128 15:45:20.033904 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="proxy-httpd" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.033955 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="proxy-httpd" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.034210 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="proxy-httpd" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.034282 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="sg-core" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.034340 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6559e55c-8d5f-4838-a3d6-585ce3815b9e" containerName="collect-profiles" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.034394 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="ceilometer-notification-agent" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.034476 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" containerName="ceilometer-central-agent" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.039852 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.044871 4647 scope.go:117] "RemoveContainer" containerID="fb9ec20e75609a9f0ac33cac0b56bdb0ca8fe557a4aa496499a9a6dfa9d4e4fb" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.051975 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.054649 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.054838 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.057108 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-run-httpd\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.057173 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8krv\" (UniqueName: \"kubernetes.io/projected/e54a4531-a1ff-465a-9203-13881d25fc60-kube-api-access-f8krv\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.057217 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-config-data\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.057303 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-scripts\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.057326 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.057353 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-log-httpd\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.057386 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.101058 4647 scope.go:117] "RemoveContainer" containerID="ddbe8ceba0322cdf118fba41510f02aaedbf84e0902f65aa205968ecc6116276" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.159608 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-config-data\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.159684 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.159746 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-log-httpd\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.159770 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-scripts\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.159805 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.159907 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-run-httpd\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.159939 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8krv\" (UniqueName: \"kubernetes.io/projected/e54a4531-a1ff-465a-9203-13881d25fc60-kube-api-access-f8krv\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.160475 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-log-httpd\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.160939 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-run-httpd\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.165579 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.165889 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-config-data\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.165920 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.166217 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-scripts\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.178108 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8krv\" (UniqueName: \"kubernetes.io/projected/e54a4531-a1ff-465a-9203-13881d25fc60-kube-api-access-f8krv\") pod \"ceilometer-0\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.860737 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4647]: I1128 15:45:20.886773 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bdf2ec5-b79f-4861-8b39-9ac0d321f922" path="/var/lib/kubelet/pods/8bdf2ec5-b79f-4861-8b39-9ac0d321f922/volumes" Nov 28 15:45:21 crc kubenswrapper[4647]: I1128 15:45:21.437981 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:45:21 crc kubenswrapper[4647]: W1128 15:45:21.439765 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode54a4531_a1ff_465a_9203_13881d25fc60.slice/crio-920bc0fdb164ff9b05c04a81a91481190e58a75e29e34e1dff9a059cab796c71 WatchSource:0}: Error finding container 920bc0fdb164ff9b05c04a81a91481190e58a75e29e34e1dff9a059cab796c71: Status 404 returned error can't find the container with id 920bc0fdb164ff9b05c04a81a91481190e58a75e29e34e1dff9a059cab796c71 Nov 28 15:45:21 crc kubenswrapper[4647]: I1128 15:45:21.446250 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:45:21 crc kubenswrapper[4647]: I1128 15:45:21.971970 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e54a4531-a1ff-465a-9203-13881d25fc60","Type":"ContainerStarted","Data":"920bc0fdb164ff9b05c04a81a91481190e58a75e29e34e1dff9a059cab796c71"} Nov 28 15:45:22 crc kubenswrapper[4647]: I1128 15:45:22.988542 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e54a4531-a1ff-465a-9203-13881d25fc60","Type":"ContainerStarted","Data":"4721f4947ae95035b2353defffa22fc8bb998f65da646b5f23b17c56d7dcf58a"} Nov 28 15:45:24 crc kubenswrapper[4647]: I1128 15:45:24.005939 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e54a4531-a1ff-465a-9203-13881d25fc60","Type":"ContainerStarted","Data":"6c616f699aed738bf9ba151465c7323e5b487e2060f034b4784719565dbc306e"} Nov 28 15:45:25 crc kubenswrapper[4647]: I1128 15:45:25.426030 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:45:25 crc kubenswrapper[4647]: I1128 15:45:25.427058 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:45:25 crc kubenswrapper[4647]: I1128 15:45:25.426988 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-66c6c46cdb-xgv7h" podUID="278aef39-0aaf-4d33-b167-0f0cca8248fd" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 28 15:45:26 crc kubenswrapper[4647]: I1128 15:45:26.045843 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e54a4531-a1ff-465a-9203-13881d25fc60","Type":"ContainerStarted","Data":"7dfd6f011a97157423aca7d59f81386cfce513967871c6bf6b41873742fd08a8"} Nov 28 15:45:28 crc kubenswrapper[4647]: I1128 15:45:28.072969 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e54a4531-a1ff-465a-9203-13881d25fc60","Type":"ContainerStarted","Data":"40116c0ed3cfc617663a083ac39b1e67782f7b53fea8398d0c1ae54dc22d2830"} Nov 28 15:45:28 crc kubenswrapper[4647]: I1128 15:45:28.075558 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:45:28 crc kubenswrapper[4647]: I1128 15:45:28.150481 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.262070064 podStartE2EDuration="9.145595647s" podCreationTimestamp="2025-11-28 15:45:19 +0000 UTC" firstStartedPulling="2025-11-28 15:45:21.44565569 +0000 UTC m=+1251.293262121" lastFinishedPulling="2025-11-28 15:45:27.329181243 +0000 UTC m=+1257.176787704" observedRunningTime="2025-11-28 15:45:28.133527158 +0000 UTC m=+1257.981133579" watchObservedRunningTime="2025-11-28 15:45:28.145595647 +0000 UTC m=+1257.993202108" Nov 28 15:45:32 crc kubenswrapper[4647]: I1128 15:45:32.120556 4647 generic.go:334] "Generic (PLEG): container finished" podID="575d03bd-0f8a-4f54-b6cb-2fdd4b48365b" containerID="6aa6bd29accae665032efa8886f0ec5db3dad456a5157940edb15e3ba07f0b6f" exitCode=0 Nov 28 15:45:32 crc kubenswrapper[4647]: I1128 15:45:32.120947 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-78mbd" event={"ID":"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b","Type":"ContainerDied","Data":"6aa6bd29accae665032efa8886f0ec5db3dad456a5157940edb15e3ba07f0b6f"} Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.549454 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.692679 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvh86\" (UniqueName: \"kubernetes.io/projected/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-kube-api-access-dvh86\") pod \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.692792 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-scripts\") pod \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.692819 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-combined-ca-bundle\") pod \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.692930 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-config-data\") pod \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\" (UID: \"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b\") " Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.736200 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-scripts" (OuterVolumeSpecName: "scripts") pod "575d03bd-0f8a-4f54-b6cb-2fdd4b48365b" (UID: "575d03bd-0f8a-4f54-b6cb-2fdd4b48365b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.736187 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-kube-api-access-dvh86" (OuterVolumeSpecName: "kube-api-access-dvh86") pod "575d03bd-0f8a-4f54-b6cb-2fdd4b48365b" (UID: "575d03bd-0f8a-4f54-b6cb-2fdd4b48365b"). InnerVolumeSpecName "kube-api-access-dvh86". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.743547 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "575d03bd-0f8a-4f54-b6cb-2fdd4b48365b" (UID: "575d03bd-0f8a-4f54-b6cb-2fdd4b48365b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.747005 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-config-data" (OuterVolumeSpecName: "config-data") pod "575d03bd-0f8a-4f54-b6cb-2fdd4b48365b" (UID: "575d03bd-0f8a-4f54-b6cb-2fdd4b48365b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.794790 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.794824 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvh86\" (UniqueName: \"kubernetes.io/projected/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-kube-api-access-dvh86\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.794835 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:33 crc kubenswrapper[4647]: I1128 15:45:33.794846 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.179797 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-78mbd" event={"ID":"575d03bd-0f8a-4f54-b6cb-2fdd4b48365b","Type":"ContainerDied","Data":"6cce4d8eff7f9ec55255f8fedd3f12197f65112e7f329ca40c1ae5ebbc375464"} Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.180230 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cce4d8eff7f9ec55255f8fedd3f12197f65112e7f329ca40c1ae5ebbc375464" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.180318 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-78mbd" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.272329 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:45:34 crc kubenswrapper[4647]: E1128 15:45:34.272799 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="575d03bd-0f8a-4f54-b6cb-2fdd4b48365b" containerName="nova-cell0-conductor-db-sync" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.272819 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="575d03bd-0f8a-4f54-b6cb-2fdd4b48365b" containerName="nova-cell0-conductor-db-sync" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.273068 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="575d03bd-0f8a-4f54-b6cb-2fdd4b48365b" containerName="nova-cell0-conductor-db-sync" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.273808 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.275679 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-8lpb6" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.277105 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.285712 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.415518 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29b2bc60-27b4-48ee-b7d5-39a9c9648c03-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"29b2bc60-27b4-48ee-b7d5-39a9c9648c03\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.415566 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29b2bc60-27b4-48ee-b7d5-39a9c9648c03-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"29b2bc60-27b4-48ee-b7d5-39a9c9648c03\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.415641 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlkl8\" (UniqueName: \"kubernetes.io/projected/29b2bc60-27b4-48ee-b7d5-39a9c9648c03-kube-api-access-jlkl8\") pod \"nova-cell0-conductor-0\" (UID: \"29b2bc60-27b4-48ee-b7d5-39a9c9648c03\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.518660 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlkl8\" (UniqueName: \"kubernetes.io/projected/29b2bc60-27b4-48ee-b7d5-39a9c9648c03-kube-api-access-jlkl8\") pod \"nova-cell0-conductor-0\" (UID: \"29b2bc60-27b4-48ee-b7d5-39a9c9648c03\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.518804 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29b2bc60-27b4-48ee-b7d5-39a9c9648c03-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"29b2bc60-27b4-48ee-b7d5-39a9c9648c03\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.518846 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29b2bc60-27b4-48ee-b7d5-39a9c9648c03-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"29b2bc60-27b4-48ee-b7d5-39a9c9648c03\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.541029 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29b2bc60-27b4-48ee-b7d5-39a9c9648c03-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"29b2bc60-27b4-48ee-b7d5-39a9c9648c03\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.541552 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29b2bc60-27b4-48ee-b7d5-39a9c9648c03-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"29b2bc60-27b4-48ee-b7d5-39a9c9648c03\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.541873 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlkl8\" (UniqueName: \"kubernetes.io/projected/29b2bc60-27b4-48ee-b7d5-39a9c9648c03-kube-api-access-jlkl8\") pod \"nova-cell0-conductor-0\" (UID: \"29b2bc60-27b4-48ee-b7d5-39a9c9648c03\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:34 crc kubenswrapper[4647]: I1128 15:45:34.591246 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:35 crc kubenswrapper[4647]: I1128 15:45:35.041329 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:45:35 crc kubenswrapper[4647]: I1128 15:45:35.194233 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"29b2bc60-27b4-48ee-b7d5-39a9c9648c03","Type":"ContainerStarted","Data":"830a35ac798cedaef038409df181e030459c0fa0f5499b9cb6e6d2d86ac5a723"} Nov 28 15:45:36 crc kubenswrapper[4647]: I1128 15:45:36.216343 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"29b2bc60-27b4-48ee-b7d5-39a9c9648c03","Type":"ContainerStarted","Data":"665e4e39f778a92dff3752c42916d60ea3965d95b5e27930d444adda383a4c31"} Nov 28 15:45:36 crc kubenswrapper[4647]: I1128 15:45:36.218000 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:36 crc kubenswrapper[4647]: I1128 15:45:36.247249 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.247212159 podStartE2EDuration="2.247212159s" podCreationTimestamp="2025-11-28 15:45:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:45:36.246405138 +0000 UTC m=+1266.094011599" watchObservedRunningTime="2025-11-28 15:45:36.247212159 +0000 UTC m=+1266.094818580" Nov 28 15:45:37 crc kubenswrapper[4647]: I1128 15:45:37.671884 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:45:39 crc kubenswrapper[4647]: I1128 15:45:39.394578 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-66c6c46cdb-xgv7h" Nov 28 15:45:39 crc kubenswrapper[4647]: I1128 15:45:39.500315 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84947f5948-ml477"] Nov 28 15:45:39 crc kubenswrapper[4647]: I1128 15:45:39.500637 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-84947f5948-ml477" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon-log" containerID="cri-o://64b235df86da853e7bafc731cad556fae249bb1f53e1740afe1e53fb1763ad97" gracePeriod=30 Nov 28 15:45:39 crc kubenswrapper[4647]: I1128 15:45:39.501132 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-84947f5948-ml477" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon" containerID="cri-o://337a6f06547910f852193d96b2d302984067b5cc092cdb2280e405c74a139b4e" gracePeriod=30 Nov 28 15:45:43 crc kubenswrapper[4647]: I1128 15:45:43.301960 4647 generic.go:334] "Generic (PLEG): container finished" podID="138caa60-71a7-49ba-9a82-42664b2b2276" containerID="337a6f06547910f852193d96b2d302984067b5cc092cdb2280e405c74a139b4e" exitCode=0 Nov 28 15:45:43 crc kubenswrapper[4647]: I1128 15:45:43.302022 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84947f5948-ml477" event={"ID":"138caa60-71a7-49ba-9a82-42664b2b2276","Type":"ContainerDied","Data":"337a6f06547910f852193d96b2d302984067b5cc092cdb2280e405c74a139b4e"} Nov 28 15:45:44 crc kubenswrapper[4647]: I1128 15:45:44.643288 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.107094 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-84947f5948-ml477" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.135461 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-26mgb"] Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.136692 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.140014 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.140984 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.153560 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-26mgb"] Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.184018 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-config-data\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.184082 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.184155 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjxbj\" (UniqueName: \"kubernetes.io/projected/cba83c78-a603-47a1-89ff-09a1f68196aa-kube-api-access-sjxbj\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.184187 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-scripts\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.285659 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-config-data\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.285715 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.285847 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjxbj\" (UniqueName: \"kubernetes.io/projected/cba83c78-a603-47a1-89ff-09a1f68196aa-kube-api-access-sjxbj\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.285902 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-scripts\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.293662 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.294680 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-scripts\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.308011 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-config-data\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.327274 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjxbj\" (UniqueName: \"kubernetes.io/projected/cba83c78-a603-47a1-89ff-09a1f68196aa-kube-api-access-sjxbj\") pod \"nova-cell0-cell-mapping-26mgb\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.354685 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.356122 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.358938 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.371321 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.397856 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.397905 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkjln\" (UniqueName: \"kubernetes.io/projected/737eb72e-106d-46eb-901b-d7a6228fae4f-kube-api-access-vkjln\") pod \"nova-cell1-novncproxy-0\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.397941 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.466911 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.499598 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.499751 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.499776 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkjln\" (UniqueName: \"kubernetes.io/projected/737eb72e-106d-46eb-901b-d7a6228fae4f-kube-api-access-vkjln\") pod \"nova-cell1-novncproxy-0\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.514508 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.516110 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.521785 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.529061 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkjln\" (UniqueName: \"kubernetes.io/projected/737eb72e-106d-46eb-901b-d7a6228fae4f-kube-api-access-vkjln\") pod \"nova-cell1-novncproxy-0\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.529589 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.537740 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.555075 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.614121 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.615692 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.647546 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.648650 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.683184 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.695119 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.695319 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.699885 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.714031 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3391d1ed-d888-4fef-bec9-2c086a7e4aae-logs\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.714094 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-config-data\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.714132 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " pod="openstack/nova-scheduler-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.714150 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.714440 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.714692 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-config-data\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.714796 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2mdx\" (UniqueName: \"kubernetes.io/projected/3d895171-8a1c-4611-8272-5f602dba88c3-kube-api-access-k2mdx\") pod \"nova-scheduler-0\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " pod="openstack/nova-scheduler-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.714899 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3975effe-4c58-4b62-b43f-ee25533e6de2-logs\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.714924 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-config-data\") pod \"nova-scheduler-0\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " pod="openstack/nova-scheduler-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.714982 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lldhn\" (UniqueName: \"kubernetes.io/projected/3391d1ed-d888-4fef-bec9-2c086a7e4aae-kube-api-access-lldhn\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.715062 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkn64\" (UniqueName: \"kubernetes.io/projected/3975effe-4c58-4b62-b43f-ee25533e6de2-kube-api-access-xkn64\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.726782 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.817162 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkn64\" (UniqueName: \"kubernetes.io/projected/3975effe-4c58-4b62-b43f-ee25533e6de2-kube-api-access-xkn64\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.827384 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3391d1ed-d888-4fef-bec9-2c086a7e4aae-logs\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.827470 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-config-data\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.827515 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " pod="openstack/nova-scheduler-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.827541 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.827651 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.827801 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-config-data\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.827873 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2mdx\" (UniqueName: \"kubernetes.io/projected/3d895171-8a1c-4611-8272-5f602dba88c3-kube-api-access-k2mdx\") pod \"nova-scheduler-0\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " pod="openstack/nova-scheduler-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.827949 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-config-data\") pod \"nova-scheduler-0\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " pod="openstack/nova-scheduler-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.827963 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3975effe-4c58-4b62-b43f-ee25533e6de2-logs\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.828001 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lldhn\" (UniqueName: \"kubernetes.io/projected/3391d1ed-d888-4fef-bec9-2c086a7e4aae-kube-api-access-lldhn\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.831928 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3391d1ed-d888-4fef-bec9-2c086a7e4aae-logs\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.832213 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3975effe-4c58-4b62-b43f-ee25533e6de2-logs\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.866736 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.867786 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.873449 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-config-data\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.875044 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-config-data\") pod \"nova-scheduler-0\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " pod="openstack/nova-scheduler-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.878700 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-config-data\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.878957 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " pod="openstack/nova-scheduler-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.895355 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkn64\" (UniqueName: \"kubernetes.io/projected/3975effe-4c58-4b62-b43f-ee25533e6de2-kube-api-access-xkn64\") pod \"nova-metadata-0\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " pod="openstack/nova-metadata-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.932725 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2mdx\" (UniqueName: \"kubernetes.io/projected/3d895171-8a1c-4611-8272-5f602dba88c3-kube-api-access-k2mdx\") pod \"nova-scheduler-0\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " pod="openstack/nova-scheduler-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.941276 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lldhn\" (UniqueName: \"kubernetes.io/projected/3391d1ed-d888-4fef-bec9-2c086a7e4aae-kube-api-access-lldhn\") pod \"nova-api-0\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.976120 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:45:45 crc kubenswrapper[4647]: I1128 15:45:45.990166 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.028315 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.057745 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-nnd25"] Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.067944 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.146163 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-nnd25"] Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.243614 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.243700 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.243749 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-config\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.243789 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-svc\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.243812 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.243851 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54r86\" (UniqueName: \"kubernetes.io/projected/7b1c9cff-79b5-4c05-b180-95573c9ab889-kube-api-access-54r86\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.349234 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.349320 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.349357 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-config\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.349388 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-svc\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.349421 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.349458 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54r86\" (UniqueName: \"kubernetes.io/projected/7b1c9cff-79b5-4c05-b180-95573c9ab889-kube-api-access-54r86\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.350633 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.351146 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.351664 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-config\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.352138 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-svc\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.379631 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.443288 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54r86\" (UniqueName: \"kubernetes.io/projected/7b1c9cff-79b5-4c05-b180-95573c9ab889-kube-api-access-54r86\") pod \"dnsmasq-dns-865f5d856f-nnd25\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.445633 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.561587 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-26mgb"] Nov 28 15:45:46 crc kubenswrapper[4647]: I1128 15:45:46.669298 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.022858 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.023508 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.088737 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.113955 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.284468 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.393458 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3d895171-8a1c-4611-8272-5f602dba88c3","Type":"ContainerStarted","Data":"9a930139201970e8fd13f6e7d74636a0d24f162b7e72d6fc0acb9cd8a905817d"} Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.395848 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3391d1ed-d888-4fef-bec9-2c086a7e4aae","Type":"ContainerStarted","Data":"b898d7db0eb8b10e566cbddd39c6e47c6b068ab6b0ebf0981812e77f17c36429"} Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.397014 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"737eb72e-106d-46eb-901b-d7a6228fae4f","Type":"ContainerStarted","Data":"bc792844c3688925e8b8c952e9764667510f4f6165cd238274284f315eb03c17"} Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.401847 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-26mgb" event={"ID":"cba83c78-a603-47a1-89ff-09a1f68196aa","Type":"ContainerStarted","Data":"f3fb184438fcc931b389fcb7364228a987e5beeba81354ecd3e7d1020ce7821e"} Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.401885 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-26mgb" event={"ID":"cba83c78-a603-47a1-89ff-09a1f68196aa","Type":"ContainerStarted","Data":"1789c72dee64b207371a8e1cab5aa6d1a0d94cb8d379232e419fb5e85ebe6f12"} Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.404280 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3975effe-4c58-4b62-b43f-ee25533e6de2","Type":"ContainerStarted","Data":"727734a4a8c172c815d7ba8f536c803a13d3ab68c923709e2dfe4b82918939ed"} Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.431198 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-26mgb" podStartSLOduration=2.431180622 podStartE2EDuration="2.431180622s" podCreationTimestamp="2025-11-28 15:45:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:45:47.424805563 +0000 UTC m=+1277.272411984" watchObservedRunningTime="2025-11-28 15:45:47.431180622 +0000 UTC m=+1277.278787043" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.463069 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-nnd25"] Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.655745 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xjx2f"] Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.662470 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.667770 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.668090 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.701498 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xjx2f"] Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.764784 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.764901 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5qfq\" (UniqueName: \"kubernetes.io/projected/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-kube-api-access-g5qfq\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.764952 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-scripts\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.764977 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-config-data\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.866887 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.867008 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5qfq\" (UniqueName: \"kubernetes.io/projected/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-kube-api-access-g5qfq\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.867067 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-scripts\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.867091 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-config-data\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.874004 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-scripts\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.877238 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.886237 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-config-data\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:47 crc kubenswrapper[4647]: I1128 15:45:47.895169 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5qfq\" (UniqueName: \"kubernetes.io/projected/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-kube-api-access-g5qfq\") pod \"nova-cell1-conductor-db-sync-xjx2f\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:48 crc kubenswrapper[4647]: I1128 15:45:48.058370 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:45:48 crc kubenswrapper[4647]: I1128 15:45:48.482174 4647 generic.go:334] "Generic (PLEG): container finished" podID="7b1c9cff-79b5-4c05-b180-95573c9ab889" containerID="926a2795eff3e6f6073f0cea90ad0bed06e58ea39d463fd3e2199416230b52e4" exitCode=0 Nov 28 15:45:48 crc kubenswrapper[4647]: I1128 15:45:48.483519 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" event={"ID":"7b1c9cff-79b5-4c05-b180-95573c9ab889","Type":"ContainerDied","Data":"926a2795eff3e6f6073f0cea90ad0bed06e58ea39d463fd3e2199416230b52e4"} Nov 28 15:45:48 crc kubenswrapper[4647]: I1128 15:45:48.483548 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" event={"ID":"7b1c9cff-79b5-4c05-b180-95573c9ab889","Type":"ContainerStarted","Data":"928e895f0ed229fdbd0515e5008fb556d0945b2dd663785f832f44df1ead3051"} Nov 28 15:45:48 crc kubenswrapper[4647]: I1128 15:45:48.733685 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xjx2f"] Nov 28 15:45:49 crc kubenswrapper[4647]: I1128 15:45:49.501813 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" event={"ID":"7b1c9cff-79b5-4c05-b180-95573c9ab889","Type":"ContainerStarted","Data":"4a1eb9a9a8b3713123364bc0d156340a60f06c57fd216610e314c78ca31c98b4"} Nov 28 15:45:49 crc kubenswrapper[4647]: I1128 15:45:49.503660 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:49 crc kubenswrapper[4647]: I1128 15:45:49.510396 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xjx2f" event={"ID":"2cd3da5e-0deb-4634-ac2e-24f2e31088c2","Type":"ContainerStarted","Data":"e7c368331fc1653673540fb85a4dfb18a7e8e4f5d3749a91d4c7184e2b405518"} Nov 28 15:45:49 crc kubenswrapper[4647]: I1128 15:45:49.510466 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xjx2f" event={"ID":"2cd3da5e-0deb-4634-ac2e-24f2e31088c2","Type":"ContainerStarted","Data":"8b9d1c48fc68364001eca90115715cbb15c7d1e6cd65b721ba9f909af3663c2c"} Nov 28 15:45:49 crc kubenswrapper[4647]: I1128 15:45:49.533072 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" podStartSLOduration=4.533054992 podStartE2EDuration="4.533054992s" podCreationTimestamp="2025-11-28 15:45:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:45:49.528610714 +0000 UTC m=+1279.376217135" watchObservedRunningTime="2025-11-28 15:45:49.533054992 +0000 UTC m=+1279.380661413" Nov 28 15:45:49 crc kubenswrapper[4647]: I1128 15:45:49.554261 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-xjx2f" podStartSLOduration=2.5542421429999997 podStartE2EDuration="2.554242143s" podCreationTimestamp="2025-11-28 15:45:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:45:49.548905802 +0000 UTC m=+1279.396512223" watchObservedRunningTime="2025-11-28 15:45:49.554242143 +0000 UTC m=+1279.401848564" Nov 28 15:45:50 crc kubenswrapper[4647]: I1128 15:45:50.190357 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:45:50 crc kubenswrapper[4647]: I1128 15:45:50.202064 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:45:50 crc kubenswrapper[4647]: I1128 15:45:50.869366 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.581295 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"737eb72e-106d-46eb-901b-d7a6228fae4f","Type":"ContainerStarted","Data":"3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2"} Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.581856 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="737eb72e-106d-46eb-901b-d7a6228fae4f" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2" gracePeriod=30 Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.586090 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3391d1ed-d888-4fef-bec9-2c086a7e4aae","Type":"ContainerStarted","Data":"67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6"} Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.586402 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3391d1ed-d888-4fef-bec9-2c086a7e4aae","Type":"ContainerStarted","Data":"0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea"} Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.590254 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3975effe-4c58-4b62-b43f-ee25533e6de2","Type":"ContainerStarted","Data":"be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684"} Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.590303 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3975effe-4c58-4b62-b43f-ee25533e6de2","Type":"ContainerStarted","Data":"d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf"} Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.590470 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="3975effe-4c58-4b62-b43f-ee25533e6de2" containerName="nova-metadata-log" containerID="cri-o://d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf" gracePeriod=30 Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.590726 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="3975effe-4c58-4b62-b43f-ee25533e6de2" containerName="nova-metadata-metadata" containerID="cri-o://be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684" gracePeriod=30 Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.595699 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3d895171-8a1c-4611-8272-5f602dba88c3","Type":"ContainerStarted","Data":"6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b"} Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.604649 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.6243743889999998 podStartE2EDuration="8.604630883s" podCreationTimestamp="2025-11-28 15:45:45 +0000 UTC" firstStartedPulling="2025-11-28 15:45:46.718258199 +0000 UTC m=+1276.565864620" lastFinishedPulling="2025-11-28 15:45:52.698514693 +0000 UTC m=+1282.546121114" observedRunningTime="2025-11-28 15:45:53.600018921 +0000 UTC m=+1283.447625342" watchObservedRunningTime="2025-11-28 15:45:53.604630883 +0000 UTC m=+1283.452237304" Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.622603 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.210020801 podStartE2EDuration="8.622587199s" podCreationTimestamp="2025-11-28 15:45:45 +0000 UTC" firstStartedPulling="2025-11-28 15:45:47.295036836 +0000 UTC m=+1277.142643257" lastFinishedPulling="2025-11-28 15:45:52.707603234 +0000 UTC m=+1282.555209655" observedRunningTime="2025-11-28 15:45:53.619497627 +0000 UTC m=+1283.467104048" watchObservedRunningTime="2025-11-28 15:45:53.622587199 +0000 UTC m=+1283.470193620" Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.688420 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.110131356 podStartE2EDuration="8.688388862s" podCreationTimestamp="2025-11-28 15:45:45 +0000 UTC" firstStartedPulling="2025-11-28 15:45:47.121623923 +0000 UTC m=+1276.969230344" lastFinishedPulling="2025-11-28 15:45:52.699881429 +0000 UTC m=+1282.547487850" observedRunningTime="2025-11-28 15:45:53.655702706 +0000 UTC m=+1283.503309117" watchObservedRunningTime="2025-11-28 15:45:53.688388862 +0000 UTC m=+1283.535995283" Nov 28 15:45:53 crc kubenswrapper[4647]: I1128 15:45:53.695220 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.116825912 podStartE2EDuration="8.695203672s" podCreationTimestamp="2025-11-28 15:45:45 +0000 UTC" firstStartedPulling="2025-11-28 15:45:47.121594402 +0000 UTC m=+1276.969200823" lastFinishedPulling="2025-11-28 15:45:52.699972162 +0000 UTC m=+1282.547578583" observedRunningTime="2025-11-28 15:45:53.68871063 +0000 UTC m=+1283.536317051" watchObservedRunningTime="2025-11-28 15:45:53.695203672 +0000 UTC m=+1283.542810093" Nov 28 15:45:54 crc kubenswrapper[4647]: I1128 15:45:54.623436 4647 generic.go:334] "Generic (PLEG): container finished" podID="3975effe-4c58-4b62-b43f-ee25533e6de2" containerID="d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf" exitCode=143 Nov 28 15:45:54 crc kubenswrapper[4647]: I1128 15:45:54.623655 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3975effe-4c58-4b62-b43f-ee25533e6de2","Type":"ContainerDied","Data":"d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf"} Nov 28 15:45:55 crc kubenswrapper[4647]: I1128 15:45:55.110509 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-84947f5948-ml477" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 28 15:45:55 crc kubenswrapper[4647]: I1128 15:45:55.727598 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:55 crc kubenswrapper[4647]: I1128 15:45:55.976925 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:45:55 crc kubenswrapper[4647]: I1128 15:45:55.977035 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:45:55 crc kubenswrapper[4647]: I1128 15:45:55.992864 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:45:55 crc kubenswrapper[4647]: I1128 15:45:55.992934 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:45:56 crc kubenswrapper[4647]: I1128 15:45:56.029322 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 15:45:56 crc kubenswrapper[4647]: I1128 15:45:56.029434 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 15:45:56 crc kubenswrapper[4647]: I1128 15:45:56.339206 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 15:45:56 crc kubenswrapper[4647]: I1128 15:45:56.449602 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:45:56 crc kubenswrapper[4647]: I1128 15:45:56.552108 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-scbst"] Nov 28 15:45:56 crc kubenswrapper[4647]: I1128 15:45:56.552345 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" podUID="58346aae-6d36-4f76-9eb8-dc5b490c9346" containerName="dnsmasq-dns" containerID="cri-o://b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4" gracePeriod=10 Nov 28 15:45:56 crc kubenswrapper[4647]: I1128 15:45:56.794384 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.060618 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.185:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.061510 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.185:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.380672 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.513539 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-svc\") pod \"58346aae-6d36-4f76-9eb8-dc5b490c9346\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.513925 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-swift-storage-0\") pod \"58346aae-6d36-4f76-9eb8-dc5b490c9346\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.514024 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kd8k5\" (UniqueName: \"kubernetes.io/projected/58346aae-6d36-4f76-9eb8-dc5b490c9346-kube-api-access-kd8k5\") pod \"58346aae-6d36-4f76-9eb8-dc5b490c9346\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.514199 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-nb\") pod \"58346aae-6d36-4f76-9eb8-dc5b490c9346\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.514308 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-config\") pod \"58346aae-6d36-4f76-9eb8-dc5b490c9346\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.514402 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-sb\") pod \"58346aae-6d36-4f76-9eb8-dc5b490c9346\" (UID: \"58346aae-6d36-4f76-9eb8-dc5b490c9346\") " Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.539798 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58346aae-6d36-4f76-9eb8-dc5b490c9346-kube-api-access-kd8k5" (OuterVolumeSpecName: "kube-api-access-kd8k5") pod "58346aae-6d36-4f76-9eb8-dc5b490c9346" (UID: "58346aae-6d36-4f76-9eb8-dc5b490c9346"). InnerVolumeSpecName "kube-api-access-kd8k5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.620171 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kd8k5\" (UniqueName: \"kubernetes.io/projected/58346aae-6d36-4f76-9eb8-dc5b490c9346-kube-api-access-kd8k5\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.655955 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "58346aae-6d36-4f76-9eb8-dc5b490c9346" (UID: "58346aae-6d36-4f76-9eb8-dc5b490c9346"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.658035 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-config" (OuterVolumeSpecName: "config") pod "58346aae-6d36-4f76-9eb8-dc5b490c9346" (UID: "58346aae-6d36-4f76-9eb8-dc5b490c9346"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.667646 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "58346aae-6d36-4f76-9eb8-dc5b490c9346" (UID: "58346aae-6d36-4f76-9eb8-dc5b490c9346"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.669188 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "58346aae-6d36-4f76-9eb8-dc5b490c9346" (UID: "58346aae-6d36-4f76-9eb8-dc5b490c9346"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.693179 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "58346aae-6d36-4f76-9eb8-dc5b490c9346" (UID: "58346aae-6d36-4f76-9eb8-dc5b490c9346"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.716452 4647 generic.go:334] "Generic (PLEG): container finished" podID="58346aae-6d36-4f76-9eb8-dc5b490c9346" containerID="b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4" exitCode=0 Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.717793 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.719779 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" event={"ID":"58346aae-6d36-4f76-9eb8-dc5b490c9346","Type":"ContainerDied","Data":"b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4"} Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.719878 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-scbst" event={"ID":"58346aae-6d36-4f76-9eb8-dc5b490c9346","Type":"ContainerDied","Data":"70cc5fdbc688340e9460048ae75f8a334fbf9a8b3b2f428edf45c6e66ba64ee6"} Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.719904 4647 scope.go:117] "RemoveContainer" containerID="b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.722105 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.722133 4647 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.722145 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.722157 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.722168 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58346aae-6d36-4f76-9eb8-dc5b490c9346-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.789391 4647 scope.go:117] "RemoveContainer" containerID="4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.835661 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-scbst"] Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.859887 4647 scope.go:117] "RemoveContainer" containerID="b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4" Nov 28 15:45:57 crc kubenswrapper[4647]: E1128 15:45:57.861352 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4\": container with ID starting with b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4 not found: ID does not exist" containerID="b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.861396 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4"} err="failed to get container status \"b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4\": rpc error: code = NotFound desc = could not find container \"b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4\": container with ID starting with b3908bbc0213e1f8476b254d20aa00267b50187f3b8bf845656f0827e3fcc3b4 not found: ID does not exist" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.861446 4647 scope.go:117] "RemoveContainer" containerID="4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2" Nov 28 15:45:57 crc kubenswrapper[4647]: E1128 15:45:57.861767 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2\": container with ID starting with 4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2 not found: ID does not exist" containerID="4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.861807 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2"} err="failed to get container status \"4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2\": rpc error: code = NotFound desc = could not find container \"4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2\": container with ID starting with 4b2c860904185658c6ae5d446ed437047bbb799f2f46d9cc751c1e38a9df07c2 not found: ID does not exist" Nov 28 15:45:57 crc kubenswrapper[4647]: I1128 15:45:57.858984 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-scbst"] Nov 28 15:45:58 crc kubenswrapper[4647]: I1128 15:45:58.436078 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58346aae-6d36-4f76-9eb8-dc5b490c9346" path="/var/lib/kubelet/pods/58346aae-6d36-4f76-9eb8-dc5b490c9346/volumes" Nov 28 15:45:59 crc kubenswrapper[4647]: I1128 15:45:59.022036 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:45:59 crc kubenswrapper[4647]: I1128 15:45:59.022692 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="4ff96da1-31d9-4b0c-8fbc-32c25a416a5c" containerName="kube-state-metrics" containerID="cri-o://2e75bde85ba51c8394b8568315f7a75ca517e7cb45987fe3d3a8fdd5fecd4f31" gracePeriod=30 Nov 28 15:45:59 crc kubenswrapper[4647]: I1128 15:45:59.740917 4647 generic.go:334] "Generic (PLEG): container finished" podID="4ff96da1-31d9-4b0c-8fbc-32c25a416a5c" containerID="2e75bde85ba51c8394b8568315f7a75ca517e7cb45987fe3d3a8fdd5fecd4f31" exitCode=2 Nov 28 15:45:59 crc kubenswrapper[4647]: I1128 15:45:59.741025 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4ff96da1-31d9-4b0c-8fbc-32c25a416a5c","Type":"ContainerDied","Data":"2e75bde85ba51c8394b8568315f7a75ca517e7cb45987fe3d3a8fdd5fecd4f31"} Nov 28 15:45:59 crc kubenswrapper[4647]: I1128 15:45:59.741397 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4ff96da1-31d9-4b0c-8fbc-32c25a416a5c","Type":"ContainerDied","Data":"0b7af25522ec50df0a4c702e22b851c4a674f81f1936ff423b891f334665c9e8"} Nov 28 15:45:59 crc kubenswrapper[4647]: I1128 15:45:59.741443 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b7af25522ec50df0a4c702e22b851c4a674f81f1936ff423b891f334665c9e8" Nov 28 15:45:59 crc kubenswrapper[4647]: I1128 15:45:59.812801 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:45:59 crc kubenswrapper[4647]: I1128 15:45:59.924349 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zt6xx\" (UniqueName: \"kubernetes.io/projected/4ff96da1-31d9-4b0c-8fbc-32c25a416a5c-kube-api-access-zt6xx\") pod \"4ff96da1-31d9-4b0c-8fbc-32c25a416a5c\" (UID: \"4ff96da1-31d9-4b0c-8fbc-32c25a416a5c\") " Nov 28 15:45:59 crc kubenswrapper[4647]: I1128 15:45:59.934948 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ff96da1-31d9-4b0c-8fbc-32c25a416a5c-kube-api-access-zt6xx" (OuterVolumeSpecName: "kube-api-access-zt6xx") pod "4ff96da1-31d9-4b0c-8fbc-32c25a416a5c" (UID: "4ff96da1-31d9-4b0c-8fbc-32c25a416a5c"). InnerVolumeSpecName "kube-api-access-zt6xx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.026769 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zt6xx\" (UniqueName: \"kubernetes.io/projected/4ff96da1-31d9-4b0c-8fbc-32c25a416a5c-kube-api-access-zt6xx\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.752942 4647 generic.go:334] "Generic (PLEG): container finished" podID="cba83c78-a603-47a1-89ff-09a1f68196aa" containerID="f3fb184438fcc931b389fcb7364228a987e5beeba81354ecd3e7d1020ce7821e" exitCode=0 Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.753032 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-26mgb" event={"ID":"cba83c78-a603-47a1-89ff-09a1f68196aa","Type":"ContainerDied","Data":"f3fb184438fcc931b389fcb7364228a987e5beeba81354ecd3e7d1020ce7821e"} Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.753075 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.801696 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.815960 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.826475 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:46:00 crc kubenswrapper[4647]: E1128 15:46:00.827034 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58346aae-6d36-4f76-9eb8-dc5b490c9346" containerName="dnsmasq-dns" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.827057 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="58346aae-6d36-4f76-9eb8-dc5b490c9346" containerName="dnsmasq-dns" Nov 28 15:46:00 crc kubenswrapper[4647]: E1128 15:46:00.827085 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58346aae-6d36-4f76-9eb8-dc5b490c9346" containerName="init" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.827097 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="58346aae-6d36-4f76-9eb8-dc5b490c9346" containerName="init" Nov 28 15:46:00 crc kubenswrapper[4647]: E1128 15:46:00.827127 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ff96da1-31d9-4b0c-8fbc-32c25a416a5c" containerName="kube-state-metrics" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.827134 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ff96da1-31d9-4b0c-8fbc-32c25a416a5c" containerName="kube-state-metrics" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.827342 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="58346aae-6d36-4f76-9eb8-dc5b490c9346" containerName="dnsmasq-dns" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.827372 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ff96da1-31d9-4b0c-8fbc-32c25a416a5c" containerName="kube-state-metrics" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.828181 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.830758 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.833066 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.847451 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.944201 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.944243 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.944292 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:00 crc kubenswrapper[4647]: I1128 15:46:00.944519 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4nmr\" (UniqueName: \"kubernetes.io/projected/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-kube-api-access-n4nmr\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.046614 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.046748 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4nmr\" (UniqueName: \"kubernetes.io/projected/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-kube-api-access-n4nmr\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.046859 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.046887 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.054851 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.054964 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.056100 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.067233 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4nmr\" (UniqueName: \"kubernetes.io/projected/bbd46dc1-09cb-44e2-8150-a1f512a3efc9-kube-api-access-n4nmr\") pod \"kube-state-metrics-0\" (UID: \"bbd46dc1-09cb-44e2-8150-a1f512a3efc9\") " pod="openstack/kube-state-metrics-0" Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.142711 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.763455 4647 generic.go:334] "Generic (PLEG): container finished" podID="2cd3da5e-0deb-4634-ac2e-24f2e31088c2" containerID="e7c368331fc1653673540fb85a4dfb18a7e8e4f5d3749a91d4c7184e2b405518" exitCode=0 Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.763585 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xjx2f" event={"ID":"2cd3da5e-0deb-4634-ac2e-24f2e31088c2","Type":"ContainerDied","Data":"e7c368331fc1653673540fb85a4dfb18a7e8e4f5d3749a91d4c7184e2b405518"} Nov 28 15:46:01 crc kubenswrapper[4647]: I1128 15:46:01.841791 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:46:01 crc kubenswrapper[4647]: W1128 15:46:01.850246 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbbd46dc1_09cb_44e2_8150_a1f512a3efc9.slice/crio-787c140f5d97e704e844e1390187df8e3a69c606fa1e7cac37a4e5a6e9cc87ad WatchSource:0}: Error finding container 787c140f5d97e704e844e1390187df8e3a69c606fa1e7cac37a4e5a6e9cc87ad: Status 404 returned error can't find the container with id 787c140f5d97e704e844e1390187df8e3a69c606fa1e7cac37a4e5a6e9cc87ad Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.053674 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.170116 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-config-data\") pod \"cba83c78-a603-47a1-89ff-09a1f68196aa\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.170383 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjxbj\" (UniqueName: \"kubernetes.io/projected/cba83c78-a603-47a1-89ff-09a1f68196aa-kube-api-access-sjxbj\") pod \"cba83c78-a603-47a1-89ff-09a1f68196aa\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.170598 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-combined-ca-bundle\") pod \"cba83c78-a603-47a1-89ff-09a1f68196aa\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.170850 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-scripts\") pod \"cba83c78-a603-47a1-89ff-09a1f68196aa\" (UID: \"cba83c78-a603-47a1-89ff-09a1f68196aa\") " Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.194186 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cba83c78-a603-47a1-89ff-09a1f68196aa-kube-api-access-sjxbj" (OuterVolumeSpecName: "kube-api-access-sjxbj") pod "cba83c78-a603-47a1-89ff-09a1f68196aa" (UID: "cba83c78-a603-47a1-89ff-09a1f68196aa"). InnerVolumeSpecName "kube-api-access-sjxbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.194206 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-scripts" (OuterVolumeSpecName: "scripts") pod "cba83c78-a603-47a1-89ff-09a1f68196aa" (UID: "cba83c78-a603-47a1-89ff-09a1f68196aa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.238098 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.239508 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="sg-core" containerID="cri-o://7dfd6f011a97157423aca7d59f81386cfce513967871c6bf6b41873742fd08a8" gracePeriod=30 Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.239656 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="proxy-httpd" containerID="cri-o://40116c0ed3cfc617663a083ac39b1e67782f7b53fea8398d0c1ae54dc22d2830" gracePeriod=30 Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.242165 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="ceilometer-notification-agent" containerID="cri-o://6c616f699aed738bf9ba151465c7323e5b487e2060f034b4784719565dbc306e" gracePeriod=30 Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.242511 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cba83c78-a603-47a1-89ff-09a1f68196aa" (UID: "cba83c78-a603-47a1-89ff-09a1f68196aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.245175 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="ceilometer-central-agent" containerID="cri-o://4721f4947ae95035b2353defffa22fc8bb998f65da646b5f23b17c56d7dcf58a" gracePeriod=30 Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.272937 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjxbj\" (UniqueName: \"kubernetes.io/projected/cba83c78-a603-47a1-89ff-09a1f68196aa-kube-api-access-sjxbj\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.272983 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.272992 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.275932 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-config-data" (OuterVolumeSpecName: "config-data") pod "cba83c78-a603-47a1-89ff-09a1f68196aa" (UID: "cba83c78-a603-47a1-89ff-09a1f68196aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.376541 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cba83c78-a603-47a1-89ff-09a1f68196aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.405645 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ff96da1-31d9-4b0c-8fbc-32c25a416a5c" path="/var/lib/kubelet/pods/4ff96da1-31d9-4b0c-8fbc-32c25a416a5c/volumes" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.775723 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"bbd46dc1-09cb-44e2-8150-a1f512a3efc9","Type":"ContainerStarted","Data":"100c0d020ebcf078a17f52894ded987662cb5bfa82dd650e5cd96e697fddf918"} Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.775995 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"bbd46dc1-09cb-44e2-8150-a1f512a3efc9","Type":"ContainerStarted","Data":"787c140f5d97e704e844e1390187df8e3a69c606fa1e7cac37a4e5a6e9cc87ad"} Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.776031 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.777693 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-26mgb" event={"ID":"cba83c78-a603-47a1-89ff-09a1f68196aa","Type":"ContainerDied","Data":"1789c72dee64b207371a8e1cab5aa6d1a0d94cb8d379232e419fb5e85ebe6f12"} Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.777741 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1789c72dee64b207371a8e1cab5aa6d1a0d94cb8d379232e419fb5e85ebe6f12" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.777816 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-26mgb" Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.782081 4647 generic.go:334] "Generic (PLEG): container finished" podID="e54a4531-a1ff-465a-9203-13881d25fc60" containerID="40116c0ed3cfc617663a083ac39b1e67782f7b53fea8398d0c1ae54dc22d2830" exitCode=0 Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.782111 4647 generic.go:334] "Generic (PLEG): container finished" podID="e54a4531-a1ff-465a-9203-13881d25fc60" containerID="7dfd6f011a97157423aca7d59f81386cfce513967871c6bf6b41873742fd08a8" exitCode=2 Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.782123 4647 generic.go:334] "Generic (PLEG): container finished" podID="e54a4531-a1ff-465a-9203-13881d25fc60" containerID="4721f4947ae95035b2353defffa22fc8bb998f65da646b5f23b17c56d7dcf58a" exitCode=0 Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.783269 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e54a4531-a1ff-465a-9203-13881d25fc60","Type":"ContainerDied","Data":"40116c0ed3cfc617663a083ac39b1e67782f7b53fea8398d0c1ae54dc22d2830"} Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.783300 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e54a4531-a1ff-465a-9203-13881d25fc60","Type":"ContainerDied","Data":"7dfd6f011a97157423aca7d59f81386cfce513967871c6bf6b41873742fd08a8"} Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.783313 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e54a4531-a1ff-465a-9203-13881d25fc60","Type":"ContainerDied","Data":"4721f4947ae95035b2353defffa22fc8bb998f65da646b5f23b17c56d7dcf58a"} Nov 28 15:46:02 crc kubenswrapper[4647]: I1128 15:46:02.823178 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.416092206 podStartE2EDuration="2.823159478s" podCreationTimestamp="2025-11-28 15:46:00 +0000 UTC" firstStartedPulling="2025-11-28 15:46:01.854488451 +0000 UTC m=+1291.702094872" lastFinishedPulling="2025-11-28 15:46:02.261555723 +0000 UTC m=+1292.109162144" observedRunningTime="2025-11-28 15:46:02.804285608 +0000 UTC m=+1292.651892029" watchObservedRunningTime="2025-11-28 15:46:02.823159478 +0000 UTC m=+1292.670765889" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.001919 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.002142 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="3d895171-8a1c-4611-8272-5f602dba88c3" containerName="nova-scheduler-scheduler" containerID="cri-o://6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b" gracePeriod=30 Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.036766 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.037010 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerName="nova-api-log" containerID="cri-o://0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea" gracePeriod=30 Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.037430 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerName="nova-api-api" containerID="cri-o://67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6" gracePeriod=30 Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.196303 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.316079 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-combined-ca-bundle\") pod \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.316186 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5qfq\" (UniqueName: \"kubernetes.io/projected/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-kube-api-access-g5qfq\") pod \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.316255 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-scripts\") pod \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.316393 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-config-data\") pod \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\" (UID: \"2cd3da5e-0deb-4634-ac2e-24f2e31088c2\") " Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.343081 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-scripts" (OuterVolumeSpecName: "scripts") pod "2cd3da5e-0deb-4634-ac2e-24f2e31088c2" (UID: "2cd3da5e-0deb-4634-ac2e-24f2e31088c2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.343354 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-kube-api-access-g5qfq" (OuterVolumeSpecName: "kube-api-access-g5qfq") pod "2cd3da5e-0deb-4634-ac2e-24f2e31088c2" (UID: "2cd3da5e-0deb-4634-ac2e-24f2e31088c2"). InnerVolumeSpecName "kube-api-access-g5qfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.384803 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-config-data" (OuterVolumeSpecName: "config-data") pod "2cd3da5e-0deb-4634-ac2e-24f2e31088c2" (UID: "2cd3da5e-0deb-4634-ac2e-24f2e31088c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.418690 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5qfq\" (UniqueName: \"kubernetes.io/projected/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-kube-api-access-g5qfq\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.418729 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.418739 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.481265 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2cd3da5e-0deb-4634-ac2e-24f2e31088c2" (UID: "2cd3da5e-0deb-4634-ac2e-24f2e31088c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.520358 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cd3da5e-0deb-4634-ac2e-24f2e31088c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.797454 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xjx2f" event={"ID":"2cd3da5e-0deb-4634-ac2e-24f2e31088c2","Type":"ContainerDied","Data":"8b9d1c48fc68364001eca90115715cbb15c7d1e6cd65b721ba9f909af3663c2c"} Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.797495 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b9d1c48fc68364001eca90115715cbb15c7d1e6cd65b721ba9f909af3663c2c" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.797494 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xjx2f" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.813888 4647 generic.go:334] "Generic (PLEG): container finished" podID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerID="0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea" exitCode=143 Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.814834 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3391d1ed-d888-4fef-bec9-2c086a7e4aae","Type":"ContainerDied","Data":"0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea"} Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.863051 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:46:03 crc kubenswrapper[4647]: E1128 15:46:03.863608 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba83c78-a603-47a1-89ff-09a1f68196aa" containerName="nova-manage" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.863634 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba83c78-a603-47a1-89ff-09a1f68196aa" containerName="nova-manage" Nov 28 15:46:03 crc kubenswrapper[4647]: E1128 15:46:03.863666 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cd3da5e-0deb-4634-ac2e-24f2e31088c2" containerName="nova-cell1-conductor-db-sync" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.863674 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cd3da5e-0deb-4634-ac2e-24f2e31088c2" containerName="nova-cell1-conductor-db-sync" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.863865 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cd3da5e-0deb-4634-ac2e-24f2e31088c2" containerName="nova-cell1-conductor-db-sync" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.863883 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="cba83c78-a603-47a1-89ff-09a1f68196aa" containerName="nova-manage" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.864509 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.871895 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.880811 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.968349 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a641c2b8-f7d8-4829-8d49-d8eff2e2d132-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a641c2b8-f7d8-4829-8d49-d8eff2e2d132\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.968399 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpmfw\" (UniqueName: \"kubernetes.io/projected/a641c2b8-f7d8-4829-8d49-d8eff2e2d132-kube-api-access-gpmfw\") pod \"nova-cell1-conductor-0\" (UID: \"a641c2b8-f7d8-4829-8d49-d8eff2e2d132\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:03 crc kubenswrapper[4647]: I1128 15:46:03.968469 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a641c2b8-f7d8-4829-8d49-d8eff2e2d132-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a641c2b8-f7d8-4829-8d49-d8eff2e2d132\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:04 crc kubenswrapper[4647]: I1128 15:46:04.070595 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a641c2b8-f7d8-4829-8d49-d8eff2e2d132-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a641c2b8-f7d8-4829-8d49-d8eff2e2d132\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:04 crc kubenswrapper[4647]: I1128 15:46:04.071170 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpmfw\" (UniqueName: \"kubernetes.io/projected/a641c2b8-f7d8-4829-8d49-d8eff2e2d132-kube-api-access-gpmfw\") pod \"nova-cell1-conductor-0\" (UID: \"a641c2b8-f7d8-4829-8d49-d8eff2e2d132\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:04 crc kubenswrapper[4647]: I1128 15:46:04.071935 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a641c2b8-f7d8-4829-8d49-d8eff2e2d132-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a641c2b8-f7d8-4829-8d49-d8eff2e2d132\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:04 crc kubenswrapper[4647]: I1128 15:46:04.074804 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a641c2b8-f7d8-4829-8d49-d8eff2e2d132-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a641c2b8-f7d8-4829-8d49-d8eff2e2d132\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:04 crc kubenswrapper[4647]: I1128 15:46:04.077459 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a641c2b8-f7d8-4829-8d49-d8eff2e2d132-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a641c2b8-f7d8-4829-8d49-d8eff2e2d132\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:04 crc kubenswrapper[4647]: I1128 15:46:04.097035 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpmfw\" (UniqueName: \"kubernetes.io/projected/a641c2b8-f7d8-4829-8d49-d8eff2e2d132-kube-api-access-gpmfw\") pod \"nova-cell1-conductor-0\" (UID: \"a641c2b8-f7d8-4829-8d49-d8eff2e2d132\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:04 crc kubenswrapper[4647]: I1128 15:46:04.186174 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:04 crc kubenswrapper[4647]: I1128 15:46:04.789130 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:46:04 crc kubenswrapper[4647]: W1128 15:46:04.794661 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda641c2b8_f7d8_4829_8d49_d8eff2e2d132.slice/crio-a0a6447a0f4ede6755eeb7662d4f07bc07921023fbf708263c3a89f88c565efe WatchSource:0}: Error finding container a0a6447a0f4ede6755eeb7662d4f07bc07921023fbf708263c3a89f88c565efe: Status 404 returned error can't find the container with id a0a6447a0f4ede6755eeb7662d4f07bc07921023fbf708263c3a89f88c565efe Nov 28 15:46:04 crc kubenswrapper[4647]: I1128 15:46:04.823866 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a641c2b8-f7d8-4829-8d49-d8eff2e2d132","Type":"ContainerStarted","Data":"a0a6447a0f4ede6755eeb7662d4f07bc07921023fbf708263c3a89f88c565efe"} Nov 28 15:46:05 crc kubenswrapper[4647]: I1128 15:46:05.107322 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-84947f5948-ml477" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 28 15:46:05 crc kubenswrapper[4647]: I1128 15:46:05.107449 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-84947f5948-ml477" Nov 28 15:46:05 crc kubenswrapper[4647]: I1128 15:46:05.834066 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a641c2b8-f7d8-4829-8d49-d8eff2e2d132","Type":"ContainerStarted","Data":"5275751ff690c3ed8d85c633468e142ab800bd7b8f1035b89242611bd9896249"} Nov 28 15:46:05 crc kubenswrapper[4647]: I1128 15:46:05.834360 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:05 crc kubenswrapper[4647]: I1128 15:46:05.850909 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.850880031 podStartE2EDuration="2.850880031s" podCreationTimestamp="2025-11-28 15:46:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:05.847330487 +0000 UTC m=+1295.694936898" watchObservedRunningTime="2025-11-28 15:46:05.850880031 +0000 UTC m=+1295.698486502" Nov 28 15:46:06 crc kubenswrapper[4647]: E1128 15:46:06.032494 4647 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:46:06 crc kubenswrapper[4647]: E1128 15:46:06.041999 4647 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:46:06 crc kubenswrapper[4647]: E1128 15:46:06.043767 4647 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:46:06 crc kubenswrapper[4647]: E1128 15:46:06.043814 4647 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="3d895171-8a1c-4611-8272-5f602dba88c3" containerName="nova-scheduler-scheduler" Nov 28 15:46:06 crc kubenswrapper[4647]: I1128 15:46:06.879026 4647 generic.go:334] "Generic (PLEG): container finished" podID="e54a4531-a1ff-465a-9203-13881d25fc60" containerID="6c616f699aed738bf9ba151465c7323e5b487e2060f034b4784719565dbc306e" exitCode=0 Nov 28 15:46:06 crc kubenswrapper[4647]: I1128 15:46:06.879124 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e54a4531-a1ff-465a-9203-13881d25fc60","Type":"ContainerDied","Data":"6c616f699aed738bf9ba151465c7323e5b487e2060f034b4784719565dbc306e"} Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.286117 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.286250 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352186 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-scripts\") pod \"e54a4531-a1ff-465a-9203-13881d25fc60\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352325 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-config-data\") pod \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352380 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-combined-ca-bundle\") pod \"e54a4531-a1ff-465a-9203-13881d25fc60\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352430 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-sg-core-conf-yaml\") pod \"e54a4531-a1ff-465a-9203-13881d25fc60\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352459 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3391d1ed-d888-4fef-bec9-2c086a7e4aae-logs\") pod \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352503 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-run-httpd\") pod \"e54a4531-a1ff-465a-9203-13881d25fc60\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352518 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-config-data\") pod \"e54a4531-a1ff-465a-9203-13881d25fc60\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352538 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lldhn\" (UniqueName: \"kubernetes.io/projected/3391d1ed-d888-4fef-bec9-2c086a7e4aae-kube-api-access-lldhn\") pod \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352579 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-log-httpd\") pod \"e54a4531-a1ff-465a-9203-13881d25fc60\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352622 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8krv\" (UniqueName: \"kubernetes.io/projected/e54a4531-a1ff-465a-9203-13881d25fc60-kube-api-access-f8krv\") pod \"e54a4531-a1ff-465a-9203-13881d25fc60\" (UID: \"e54a4531-a1ff-465a-9203-13881d25fc60\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.352656 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-combined-ca-bundle\") pod \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\" (UID: \"3391d1ed-d888-4fef-bec9-2c086a7e4aae\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.355611 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e54a4531-a1ff-465a-9203-13881d25fc60" (UID: "e54a4531-a1ff-465a-9203-13881d25fc60"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.356325 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3391d1ed-d888-4fef-bec9-2c086a7e4aae-logs" (OuterVolumeSpecName: "logs") pod "3391d1ed-d888-4fef-bec9-2c086a7e4aae" (UID: "3391d1ed-d888-4fef-bec9-2c086a7e4aae"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.356665 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e54a4531-a1ff-465a-9203-13881d25fc60" (UID: "e54a4531-a1ff-465a-9203-13881d25fc60"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.374958 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3391d1ed-d888-4fef-bec9-2c086a7e4aae-kube-api-access-lldhn" (OuterVolumeSpecName: "kube-api-access-lldhn") pod "3391d1ed-d888-4fef-bec9-2c086a7e4aae" (UID: "3391d1ed-d888-4fef-bec9-2c086a7e4aae"). InnerVolumeSpecName "kube-api-access-lldhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.381380 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e54a4531-a1ff-465a-9203-13881d25fc60-kube-api-access-f8krv" (OuterVolumeSpecName: "kube-api-access-f8krv") pod "e54a4531-a1ff-465a-9203-13881d25fc60" (UID: "e54a4531-a1ff-465a-9203-13881d25fc60"). InnerVolumeSpecName "kube-api-access-f8krv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.383188 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-scripts" (OuterVolumeSpecName: "scripts") pod "e54a4531-a1ff-465a-9203-13881d25fc60" (UID: "e54a4531-a1ff-465a-9203-13881d25fc60"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.391556 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3391d1ed-d888-4fef-bec9-2c086a7e4aae" (UID: "3391d1ed-d888-4fef-bec9-2c086a7e4aae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.409109 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e54a4531-a1ff-465a-9203-13881d25fc60" (UID: "e54a4531-a1ff-465a-9203-13881d25fc60"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.425818 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-config-data" (OuterVolumeSpecName: "config-data") pod "3391d1ed-d888-4fef-bec9-2c086a7e4aae" (UID: "3391d1ed-d888-4fef-bec9-2c086a7e4aae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.456309 4647 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.456354 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lldhn\" (UniqueName: \"kubernetes.io/projected/3391d1ed-d888-4fef-bec9-2c086a7e4aae-kube-api-access-lldhn\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.456366 4647 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e54a4531-a1ff-465a-9203-13881d25fc60-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.456377 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8krv\" (UniqueName: \"kubernetes.io/projected/e54a4531-a1ff-465a-9203-13881d25fc60-kube-api-access-f8krv\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.456389 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.456406 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.456432 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3391d1ed-d888-4fef-bec9-2c086a7e4aae-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.456443 4647 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.456454 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3391d1ed-d888-4fef-bec9-2c086a7e4aae-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.478849 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e54a4531-a1ff-465a-9203-13881d25fc60" (UID: "e54a4531-a1ff-465a-9203-13881d25fc60"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.545115 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-config-data" (OuterVolumeSpecName: "config-data") pod "e54a4531-a1ff-465a-9203-13881d25fc60" (UID: "e54a4531-a1ff-465a-9203-13881d25fc60"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.552946 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.558007 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.558027 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e54a4531-a1ff-465a-9203-13881d25fc60-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.666363 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-combined-ca-bundle\") pod \"3d895171-8a1c-4611-8272-5f602dba88c3\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.666618 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2mdx\" (UniqueName: \"kubernetes.io/projected/3d895171-8a1c-4611-8272-5f602dba88c3-kube-api-access-k2mdx\") pod \"3d895171-8a1c-4611-8272-5f602dba88c3\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.666770 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-config-data\") pod \"3d895171-8a1c-4611-8272-5f602dba88c3\" (UID: \"3d895171-8a1c-4611-8272-5f602dba88c3\") " Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.678096 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d895171-8a1c-4611-8272-5f602dba88c3-kube-api-access-k2mdx" (OuterVolumeSpecName: "kube-api-access-k2mdx") pod "3d895171-8a1c-4611-8272-5f602dba88c3" (UID: "3d895171-8a1c-4611-8272-5f602dba88c3"). InnerVolumeSpecName "kube-api-access-k2mdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.724784 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-config-data" (OuterVolumeSpecName: "config-data") pod "3d895171-8a1c-4611-8272-5f602dba88c3" (UID: "3d895171-8a1c-4611-8272-5f602dba88c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.730612 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d895171-8a1c-4611-8272-5f602dba88c3" (UID: "3d895171-8a1c-4611-8272-5f602dba88c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.770303 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2mdx\" (UniqueName: \"kubernetes.io/projected/3d895171-8a1c-4611-8272-5f602dba88c3-kube-api-access-k2mdx\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.770593 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.770674 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d895171-8a1c-4611-8272-5f602dba88c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.890809 4647 generic.go:334] "Generic (PLEG): container finished" podID="3d895171-8a1c-4611-8272-5f602dba88c3" containerID="6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b" exitCode=0 Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.890899 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3d895171-8a1c-4611-8272-5f602dba88c3","Type":"ContainerDied","Data":"6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b"} Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.891219 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3d895171-8a1c-4611-8272-5f602dba88c3","Type":"ContainerDied","Data":"9a930139201970e8fd13f6e7d74636a0d24f162b7e72d6fc0acb9cd8a905817d"} Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.891286 4647 scope.go:117] "RemoveContainer" containerID="6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.890958 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.895652 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e54a4531-a1ff-465a-9203-13881d25fc60","Type":"ContainerDied","Data":"920bc0fdb164ff9b05c04a81a91481190e58a75e29e34e1dff9a059cab796c71"} Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.895772 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.902378 4647 generic.go:334] "Generic (PLEG): container finished" podID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerID="67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6" exitCode=0 Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.902455 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3391d1ed-d888-4fef-bec9-2c086a7e4aae","Type":"ContainerDied","Data":"67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6"} Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.902488 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3391d1ed-d888-4fef-bec9-2c086a7e4aae","Type":"ContainerDied","Data":"b898d7db0eb8b10e566cbddd39c6e47c6b068ab6b0ebf0981812e77f17c36429"} Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.902598 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.923428 4647 scope.go:117] "RemoveContainer" containerID="6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b" Nov 28 15:46:07 crc kubenswrapper[4647]: E1128 15:46:07.926973 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b\": container with ID starting with 6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b not found: ID does not exist" containerID="6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.927052 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b"} err="failed to get container status \"6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b\": rpc error: code = NotFound desc = could not find container \"6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b\": container with ID starting with 6a63bd2c636b801efe2322bb0035e88b49fc9d5354f36b1b06a7d886214fab2b not found: ID does not exist" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.927082 4647 scope.go:117] "RemoveContainer" containerID="40116c0ed3cfc617663a083ac39b1e67782f7b53fea8398d0c1ae54dc22d2830" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.943593 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.953797 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.969438 4647 scope.go:117] "RemoveContainer" containerID="7dfd6f011a97157423aca7d59f81386cfce513967871c6bf6b41873742fd08a8" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.982663 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:07 crc kubenswrapper[4647]: E1128 15:46:07.983273 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="proxy-httpd" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983301 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="proxy-httpd" Nov 28 15:46:07 crc kubenswrapper[4647]: E1128 15:46:07.983338 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="sg-core" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983349 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="sg-core" Nov 28 15:46:07 crc kubenswrapper[4647]: E1128 15:46:07.983362 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="ceilometer-central-agent" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983371 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="ceilometer-central-agent" Nov 28 15:46:07 crc kubenswrapper[4647]: E1128 15:46:07.983390 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="ceilometer-notification-agent" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983398 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="ceilometer-notification-agent" Nov 28 15:46:07 crc kubenswrapper[4647]: E1128 15:46:07.983431 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d895171-8a1c-4611-8272-5f602dba88c3" containerName="nova-scheduler-scheduler" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983440 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d895171-8a1c-4611-8272-5f602dba88c3" containerName="nova-scheduler-scheduler" Nov 28 15:46:07 crc kubenswrapper[4647]: E1128 15:46:07.983457 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerName="nova-api-api" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983466 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerName="nova-api-api" Nov 28 15:46:07 crc kubenswrapper[4647]: E1128 15:46:07.983485 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerName="nova-api-log" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983494 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerName="nova-api-log" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983735 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="sg-core" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983758 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="ceilometer-notification-agent" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983773 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerName="nova-api-log" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983795 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="proxy-httpd" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983809 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" containerName="ceilometer-central-agent" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983820 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" containerName="nova-api-api" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.983835 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d895171-8a1c-4611-8272-5f602dba88c3" containerName="nova-scheduler-scheduler" Nov 28 15:46:07 crc kubenswrapper[4647]: I1128 15:46:07.994129 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.004001 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.004221 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.004360 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.004560 4647 scope.go:117] "RemoveContainer" containerID="6c616f699aed738bf9ba151465c7323e5b487e2060f034b4784719565dbc306e" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.004747 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.020731 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.033900 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.041735 4647 scope.go:117] "RemoveContainer" containerID="4721f4947ae95035b2353defffa22fc8bb998f65da646b5f23b17c56d7dcf58a" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.049533 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.059613 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.061811 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.067460 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.075007 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.075151 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.075362 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-run-httpd\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.075465 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pljv8\" (UniqueName: \"kubernetes.io/projected/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-kube-api-access-pljv8\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.075545 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-config-data\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.075621 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.075731 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-log-httpd\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.075896 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-scripts\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.083751 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.096790 4647 scope.go:117] "RemoveContainer" containerID="67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.108366 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.112996 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.120364 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.132740 4647 scope.go:117] "RemoveContainer" containerID="0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.146273 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.156100 4647 scope.go:117] "RemoveContainer" containerID="67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6" Nov 28 15:46:08 crc kubenswrapper[4647]: E1128 15:46:08.156666 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6\": container with ID starting with 67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6 not found: ID does not exist" containerID="67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.156722 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6"} err="failed to get container status \"67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6\": rpc error: code = NotFound desc = could not find container \"67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6\": container with ID starting with 67f63550e305eafa62a09dfbdf8dd2ebf8ef4a6ebf81f3b5a8ade49ca94c79f6 not found: ID does not exist" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.156747 4647 scope.go:117] "RemoveContainer" containerID="0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea" Nov 28 15:46:08 crc kubenswrapper[4647]: E1128 15:46:08.157015 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea\": container with ID starting with 0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea not found: ID does not exist" containerID="0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.157037 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea"} err="failed to get container status \"0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea\": rpc error: code = NotFound desc = could not find container \"0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea\": container with ID starting with 0b9a0174f6e3efacc3dd52880e78fb9a53ac0152328e30d1927a19c12b3b9aea not found: ID does not exist" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.167841 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.178614 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-config-data\") pod \"nova-scheduler-0\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.178779 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-log-httpd\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.178926 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-scripts\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179020 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179099 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179199 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179293 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179364 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d92ck\" (UniqueName: \"kubernetes.io/projected/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-kube-api-access-d92ck\") pod \"nova-scheduler-0\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179457 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-run-httpd\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179537 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pljv8\" (UniqueName: \"kubernetes.io/projected/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-kube-api-access-pljv8\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179625 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-config-data\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179717 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179789 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b62800be-3750-4388-b919-a8ad1373a9b4-logs\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179874 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-config-data\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.179939 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44xkm\" (UniqueName: \"kubernetes.io/projected/b62800be-3750-4388-b919-a8ad1373a9b4-kube-api-access-44xkm\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.180481 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-run-httpd\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.180898 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-log-httpd\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.184125 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-scripts\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.185637 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-config-data\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.187241 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.190592 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.190871 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.218932 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pljv8\" (UniqueName: \"kubernetes.io/projected/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-kube-api-access-pljv8\") pod \"ceilometer-0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.281857 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.282664 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.282687 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d92ck\" (UniqueName: \"kubernetes.io/projected/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-kube-api-access-d92ck\") pod \"nova-scheduler-0\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.282738 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b62800be-3750-4388-b919-a8ad1373a9b4-logs\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.282768 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-config-data\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.282784 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44xkm\" (UniqueName: \"kubernetes.io/projected/b62800be-3750-4388-b919-a8ad1373a9b4-kube-api-access-44xkm\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.282817 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-config-data\") pod \"nova-scheduler-0\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.283525 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b62800be-3750-4388-b919-a8ad1373a9b4-logs\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.286538 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-config-data\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.287033 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.287333 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-config-data\") pod \"nova-scheduler-0\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.287728 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.304280 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44xkm\" (UniqueName: \"kubernetes.io/projected/b62800be-3750-4388-b919-a8ad1373a9b4-kube-api-access-44xkm\") pod \"nova-api-0\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.305312 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d92ck\" (UniqueName: \"kubernetes.io/projected/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-kube-api-access-d92ck\") pod \"nova-scheduler-0\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.324163 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.383304 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.411730 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3391d1ed-d888-4fef-bec9-2c086a7e4aae" path="/var/lib/kubelet/pods/3391d1ed-d888-4fef-bec9-2c086a7e4aae/volumes" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.412343 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d895171-8a1c-4611-8272-5f602dba88c3" path="/var/lib/kubelet/pods/3d895171-8a1c-4611-8272-5f602dba88c3/volumes" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.412923 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e54a4531-a1ff-465a-9203-13881d25fc60" path="/var/lib/kubelet/pods/e54a4531-a1ff-465a-9203-13881d25fc60/volumes" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.431513 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.872008 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:08 crc kubenswrapper[4647]: I1128 15:46:08.924303 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9be811f3-c9ec-4cc0-842f-3c831cdae7f0","Type":"ContainerStarted","Data":"2ef50c816bc0f121bc3ccdab174dca1918beef82beaf7e7b1b2427e3ce0ae0a7"} Nov 28 15:46:09 crc kubenswrapper[4647]: W1128 15:46:09.040836 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb62800be_3750_4388_b919_a8ad1373a9b4.slice/crio-41674f62d16d13961f960054b6d391684b5e42160abb2dc5773d5c80953203db WatchSource:0}: Error finding container 41674f62d16d13961f960054b6d391684b5e42160abb2dc5773d5c80953203db: Status 404 returned error can't find the container with id 41674f62d16d13961f960054b6d391684b5e42160abb2dc5773d5c80953203db Nov 28 15:46:09 crc kubenswrapper[4647]: I1128 15:46:09.046564 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:09 crc kubenswrapper[4647]: I1128 15:46:09.053381 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:09 crc kubenswrapper[4647]: I1128 15:46:09.224256 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 15:46:09 crc kubenswrapper[4647]: I1128 15:46:09.956329 4647 generic.go:334] "Generic (PLEG): container finished" podID="138caa60-71a7-49ba-9a82-42664b2b2276" containerID="64b235df86da853e7bafc731cad556fae249bb1f53e1740afe1e53fb1763ad97" exitCode=137 Nov 28 15:46:09 crc kubenswrapper[4647]: I1128 15:46:09.956720 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84947f5948-ml477" event={"ID":"138caa60-71a7-49ba-9a82-42664b2b2276","Type":"ContainerDied","Data":"64b235df86da853e7bafc731cad556fae249bb1f53e1740afe1e53fb1763ad97"} Nov 28 15:46:09 crc kubenswrapper[4647]: I1128 15:46:09.978920 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b62800be-3750-4388-b919-a8ad1373a9b4","Type":"ContainerStarted","Data":"b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089"} Nov 28 15:46:09 crc kubenswrapper[4647]: I1128 15:46:09.979275 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b62800be-3750-4388-b919-a8ad1373a9b4","Type":"ContainerStarted","Data":"23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4"} Nov 28 15:46:09 crc kubenswrapper[4647]: I1128 15:46:09.979286 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b62800be-3750-4388-b919-a8ad1373a9b4","Type":"ContainerStarted","Data":"41674f62d16d13961f960054b6d391684b5e42160abb2dc5773d5c80953203db"} Nov 28 15:46:09 crc kubenswrapper[4647]: I1128 15:46:09.993422 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3","Type":"ContainerStarted","Data":"01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0"} Nov 28 15:46:09 crc kubenswrapper[4647]: I1128 15:46:09.993475 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3","Type":"ContainerStarted","Data":"f8b921c9c9e34e22c0ed90eec1516b76e976bafb36d053695a0cf639a11908a2"} Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.015304 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.015289642 podStartE2EDuration="3.015289642s" podCreationTimestamp="2025-11-28 15:46:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:10.014312526 +0000 UTC m=+1299.861918947" watchObservedRunningTime="2025-11-28 15:46:10.015289642 +0000 UTC m=+1299.862896063" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.076187 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.076169247 podStartE2EDuration="2.076169247s" podCreationTimestamp="2025-11-28 15:46:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:10.041447826 +0000 UTC m=+1299.889054247" watchObservedRunningTime="2025-11-28 15:46:10.076169247 +0000 UTC m=+1299.923775668" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.214480 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84947f5948-ml477" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.362985 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-scripts\") pod \"138caa60-71a7-49ba-9a82-42664b2b2276\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.363122 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-combined-ca-bundle\") pod \"138caa60-71a7-49ba-9a82-42664b2b2276\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.363804 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xxwc\" (UniqueName: \"kubernetes.io/projected/138caa60-71a7-49ba-9a82-42664b2b2276-kube-api-access-8xxwc\") pod \"138caa60-71a7-49ba-9a82-42664b2b2276\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.363845 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-config-data\") pod \"138caa60-71a7-49ba-9a82-42664b2b2276\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.363896 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/138caa60-71a7-49ba-9a82-42664b2b2276-logs\") pod \"138caa60-71a7-49ba-9a82-42664b2b2276\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.363980 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-tls-certs\") pod \"138caa60-71a7-49ba-9a82-42664b2b2276\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.364076 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-secret-key\") pod \"138caa60-71a7-49ba-9a82-42664b2b2276\" (UID: \"138caa60-71a7-49ba-9a82-42664b2b2276\") " Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.365926 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/138caa60-71a7-49ba-9a82-42664b2b2276-logs" (OuterVolumeSpecName: "logs") pod "138caa60-71a7-49ba-9a82-42664b2b2276" (UID: "138caa60-71a7-49ba-9a82-42664b2b2276"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.370554 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/138caa60-71a7-49ba-9a82-42664b2b2276-kube-api-access-8xxwc" (OuterVolumeSpecName: "kube-api-access-8xxwc") pod "138caa60-71a7-49ba-9a82-42664b2b2276" (UID: "138caa60-71a7-49ba-9a82-42664b2b2276"). InnerVolumeSpecName "kube-api-access-8xxwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.375019 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "138caa60-71a7-49ba-9a82-42664b2b2276" (UID: "138caa60-71a7-49ba-9a82-42664b2b2276"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.391113 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-config-data" (OuterVolumeSpecName: "config-data") pod "138caa60-71a7-49ba-9a82-42664b2b2276" (UID: "138caa60-71a7-49ba-9a82-42664b2b2276"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.432248 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-scripts" (OuterVolumeSpecName: "scripts") pod "138caa60-71a7-49ba-9a82-42664b2b2276" (UID: "138caa60-71a7-49ba-9a82-42664b2b2276"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.471460 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xxwc\" (UniqueName: \"kubernetes.io/projected/138caa60-71a7-49ba-9a82-42664b2b2276-kube-api-access-8xxwc\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.471768 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.471778 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/138caa60-71a7-49ba-9a82-42664b2b2276-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.471789 4647 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.471811 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/138caa60-71a7-49ba-9a82-42664b2b2276-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.498907 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "138caa60-71a7-49ba-9a82-42664b2b2276" (UID: "138caa60-71a7-49ba-9a82-42664b2b2276"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.502844 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "138caa60-71a7-49ba-9a82-42664b2b2276" (UID: "138caa60-71a7-49ba-9a82-42664b2b2276"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.573757 4647 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:10 crc kubenswrapper[4647]: I1128 15:46:10.573790 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/138caa60-71a7-49ba-9a82-42664b2b2276-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:11 crc kubenswrapper[4647]: I1128 15:46:11.004655 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9be811f3-c9ec-4cc0-842f-3c831cdae7f0","Type":"ContainerStarted","Data":"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9"} Nov 28 15:46:11 crc kubenswrapper[4647]: I1128 15:46:11.004707 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9be811f3-c9ec-4cc0-842f-3c831cdae7f0","Type":"ContainerStarted","Data":"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3"} Nov 28 15:46:11 crc kubenswrapper[4647]: I1128 15:46:11.007033 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84947f5948-ml477" event={"ID":"138caa60-71a7-49ba-9a82-42664b2b2276","Type":"ContainerDied","Data":"d248e4b4608f30316e3dc85463f78bf90125565718729d2e80c09bd3dbba0470"} Nov 28 15:46:11 crc kubenswrapper[4647]: I1128 15:46:11.007070 4647 scope.go:117] "RemoveContainer" containerID="337a6f06547910f852193d96b2d302984067b5cc092cdb2280e405c74a139b4e" Nov 28 15:46:11 crc kubenswrapper[4647]: I1128 15:46:11.007180 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84947f5948-ml477" Nov 28 15:46:11 crc kubenswrapper[4647]: I1128 15:46:11.042870 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84947f5948-ml477"] Nov 28 15:46:11 crc kubenswrapper[4647]: I1128 15:46:11.049185 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-84947f5948-ml477"] Nov 28 15:46:11 crc kubenswrapper[4647]: I1128 15:46:11.152593 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 15:46:11 crc kubenswrapper[4647]: I1128 15:46:11.223826 4647 scope.go:117] "RemoveContainer" containerID="64b235df86da853e7bafc731cad556fae249bb1f53e1740afe1e53fb1763ad97" Nov 28 15:46:12 crc kubenswrapper[4647]: I1128 15:46:12.017467 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9be811f3-c9ec-4cc0-842f-3c831cdae7f0","Type":"ContainerStarted","Data":"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384"} Nov 28 15:46:12 crc kubenswrapper[4647]: I1128 15:46:12.404617 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" path="/var/lib/kubelet/pods/138caa60-71a7-49ba-9a82-42664b2b2276/volumes" Nov 28 15:46:13 crc kubenswrapper[4647]: I1128 15:46:13.432588 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 15:46:14 crc kubenswrapper[4647]: I1128 15:46:14.061172 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9be811f3-c9ec-4cc0-842f-3c831cdae7f0","Type":"ContainerStarted","Data":"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217"} Nov 28 15:46:14 crc kubenswrapper[4647]: I1128 15:46:14.061338 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:46:14 crc kubenswrapper[4647]: I1128 15:46:14.085748 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.021674591 podStartE2EDuration="7.085730487s" podCreationTimestamp="2025-11-28 15:46:07 +0000 UTC" firstStartedPulling="2025-11-28 15:46:08.891128472 +0000 UTC m=+1298.738734893" lastFinishedPulling="2025-11-28 15:46:12.955184358 +0000 UTC m=+1302.802790789" observedRunningTime="2025-11-28 15:46:14.081837244 +0000 UTC m=+1303.929443665" watchObservedRunningTime="2025-11-28 15:46:14.085730487 +0000 UTC m=+1303.933336908" Nov 28 15:46:17 crc kubenswrapper[4647]: I1128 15:46:17.023370 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:46:17 crc kubenswrapper[4647]: I1128 15:46:17.024067 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:46:17 crc kubenswrapper[4647]: I1128 15:46:17.024130 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:46:17 crc kubenswrapper[4647]: I1128 15:46:17.024897 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"06d277d727639fef75113ec72cf0adfccb4fffa2c30a4bb0f3631c657cbb984b"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:46:17 crc kubenswrapper[4647]: I1128 15:46:17.024955 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://06d277d727639fef75113ec72cf0adfccb4fffa2c30a4bb0f3631c657cbb984b" gracePeriod=600 Nov 28 15:46:18 crc kubenswrapper[4647]: I1128 15:46:18.110364 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="06d277d727639fef75113ec72cf0adfccb4fffa2c30a4bb0f3631c657cbb984b" exitCode=0 Nov 28 15:46:18 crc kubenswrapper[4647]: I1128 15:46:18.110508 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"06d277d727639fef75113ec72cf0adfccb4fffa2c30a4bb0f3631c657cbb984b"} Nov 28 15:46:18 crc kubenswrapper[4647]: I1128 15:46:18.110882 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0"} Nov 28 15:46:18 crc kubenswrapper[4647]: I1128 15:46:18.110917 4647 scope.go:117] "RemoveContainer" containerID="0d6ebaf8cb633650448f2badcf640129d01a9742c40868864eb5611603a41a80" Nov 28 15:46:18 crc kubenswrapper[4647]: I1128 15:46:18.383917 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:46:18 crc kubenswrapper[4647]: I1128 15:46:18.384329 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:46:18 crc kubenswrapper[4647]: I1128 15:46:18.432607 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 15:46:18 crc kubenswrapper[4647]: I1128 15:46:18.467126 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 15:46:19 crc kubenswrapper[4647]: I1128 15:46:19.160999 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 15:46:19 crc kubenswrapper[4647]: I1128 15:46:19.466731 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:46:19 crc kubenswrapper[4647]: I1128 15:46:19.466745 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.129314 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.135809 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.188639 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-config-data\") pod \"737eb72e-106d-46eb-901b-d7a6228fae4f\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.188937 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkn64\" (UniqueName: \"kubernetes.io/projected/3975effe-4c58-4b62-b43f-ee25533e6de2-kube-api-access-xkn64\") pod \"3975effe-4c58-4b62-b43f-ee25533e6de2\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.189127 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-combined-ca-bundle\") pod \"737eb72e-106d-46eb-901b-d7a6228fae4f\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.240560 4647 generic.go:334] "Generic (PLEG): container finished" podID="3975effe-4c58-4b62-b43f-ee25533e6de2" containerID="be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684" exitCode=137 Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.240699 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.240750 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3975effe-4c58-4b62-b43f-ee25533e6de2","Type":"ContainerDied","Data":"be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684"} Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.240792 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3975effe-4c58-4b62-b43f-ee25533e6de2","Type":"ContainerDied","Data":"727734a4a8c172c815d7ba8f536c803a13d3ab68c923709e2dfe4b82918939ed"} Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.240834 4647 scope.go:117] "RemoveContainer" containerID="be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.241730 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3975effe-4c58-4b62-b43f-ee25533e6de2-kube-api-access-xkn64" (OuterVolumeSpecName: "kube-api-access-xkn64") pod "3975effe-4c58-4b62-b43f-ee25533e6de2" (UID: "3975effe-4c58-4b62-b43f-ee25533e6de2"). InnerVolumeSpecName "kube-api-access-xkn64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.245670 4647 generic.go:334] "Generic (PLEG): container finished" podID="737eb72e-106d-46eb-901b-d7a6228fae4f" containerID="3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2" exitCode=137 Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.245795 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"737eb72e-106d-46eb-901b-d7a6228fae4f","Type":"ContainerDied","Data":"3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2"} Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.245884 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"737eb72e-106d-46eb-901b-d7a6228fae4f","Type":"ContainerDied","Data":"bc792844c3688925e8b8c952e9764667510f4f6165cd238274284f315eb03c17"} Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.245999 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.252539 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "737eb72e-106d-46eb-901b-d7a6228fae4f" (UID: "737eb72e-106d-46eb-901b-d7a6228fae4f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.257569 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-config-data" (OuterVolumeSpecName: "config-data") pod "737eb72e-106d-46eb-901b-d7a6228fae4f" (UID: "737eb72e-106d-46eb-901b-d7a6228fae4f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.271160 4647 scope.go:117] "RemoveContainer" containerID="d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.290721 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-combined-ca-bundle\") pod \"3975effe-4c58-4b62-b43f-ee25533e6de2\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.290868 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3975effe-4c58-4b62-b43f-ee25533e6de2-logs\") pod \"3975effe-4c58-4b62-b43f-ee25533e6de2\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.291068 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkjln\" (UniqueName: \"kubernetes.io/projected/737eb72e-106d-46eb-901b-d7a6228fae4f-kube-api-access-vkjln\") pod \"737eb72e-106d-46eb-901b-d7a6228fae4f\" (UID: \"737eb72e-106d-46eb-901b-d7a6228fae4f\") " Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.291338 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3975effe-4c58-4b62-b43f-ee25533e6de2-logs" (OuterVolumeSpecName: "logs") pod "3975effe-4c58-4b62-b43f-ee25533e6de2" (UID: "3975effe-4c58-4b62-b43f-ee25533e6de2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.291356 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-config-data\") pod \"3975effe-4c58-4b62-b43f-ee25533e6de2\" (UID: \"3975effe-4c58-4b62-b43f-ee25533e6de2\") " Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.292372 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.292395 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkn64\" (UniqueName: \"kubernetes.io/projected/3975effe-4c58-4b62-b43f-ee25533e6de2-kube-api-access-xkn64\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.292422 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/737eb72e-106d-46eb-901b-d7a6228fae4f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.292432 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3975effe-4c58-4b62-b43f-ee25533e6de2-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.295823 4647 scope.go:117] "RemoveContainer" containerID="be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684" Nov 28 15:46:24 crc kubenswrapper[4647]: E1128 15:46:24.296488 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684\": container with ID starting with be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684 not found: ID does not exist" containerID="be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.296525 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684"} err="failed to get container status \"be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684\": rpc error: code = NotFound desc = could not find container \"be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684\": container with ID starting with be4d504b3ec4f8800faafa94dfc4e867129f3685698955d6bec986c01a8c8684 not found: ID does not exist" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.296556 4647 scope.go:117] "RemoveContainer" containerID="d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf" Nov 28 15:46:24 crc kubenswrapper[4647]: E1128 15:46:24.297612 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf\": container with ID starting with d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf not found: ID does not exist" containerID="d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.297643 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf"} err="failed to get container status \"d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf\": rpc error: code = NotFound desc = could not find container \"d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf\": container with ID starting with d18146e5ae1a6368fe55dac5100c2a7b514d172c434cd1e88afb55b0ba5b7dcf not found: ID does not exist" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.297665 4647 scope.go:117] "RemoveContainer" containerID="3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.298068 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/737eb72e-106d-46eb-901b-d7a6228fae4f-kube-api-access-vkjln" (OuterVolumeSpecName: "kube-api-access-vkjln") pod "737eb72e-106d-46eb-901b-d7a6228fae4f" (UID: "737eb72e-106d-46eb-901b-d7a6228fae4f"). InnerVolumeSpecName "kube-api-access-vkjln". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.318064 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3975effe-4c58-4b62-b43f-ee25533e6de2" (UID: "3975effe-4c58-4b62-b43f-ee25533e6de2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.321731 4647 scope.go:117] "RemoveContainer" containerID="3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2" Nov 28 15:46:24 crc kubenswrapper[4647]: E1128 15:46:24.322642 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2\": container with ID starting with 3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2 not found: ID does not exist" containerID="3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.322715 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2"} err="failed to get container status \"3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2\": rpc error: code = NotFound desc = could not find container \"3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2\": container with ID starting with 3f821491912b78ce5547f72e81133e7a6130ff377b1e8bfab1af13e9478b65a2 not found: ID does not exist" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.334720 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-config-data" (OuterVolumeSpecName: "config-data") pod "3975effe-4c58-4b62-b43f-ee25533e6de2" (UID: "3975effe-4c58-4b62-b43f-ee25533e6de2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.394976 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.395017 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3975effe-4c58-4b62-b43f-ee25533e6de2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.395029 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkjln\" (UniqueName: \"kubernetes.io/projected/737eb72e-106d-46eb-901b-d7a6228fae4f-kube-api-access-vkjln\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.571808 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.581332 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.591351 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.609394 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.631515 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:24 crc kubenswrapper[4647]: E1128 15:46:24.632186 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3975effe-4c58-4b62-b43f-ee25533e6de2" containerName="nova-metadata-log" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.632212 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3975effe-4c58-4b62-b43f-ee25533e6de2" containerName="nova-metadata-log" Nov 28 15:46:24 crc kubenswrapper[4647]: E1128 15:46:24.632236 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon-log" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.632247 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon-log" Nov 28 15:46:24 crc kubenswrapper[4647]: E1128 15:46:24.632256 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.632263 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon" Nov 28 15:46:24 crc kubenswrapper[4647]: E1128 15:46:24.632296 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="737eb72e-106d-46eb-901b-d7a6228fae4f" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.632303 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="737eb72e-106d-46eb-901b-d7a6228fae4f" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:46:24 crc kubenswrapper[4647]: E1128 15:46:24.632323 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3975effe-4c58-4b62-b43f-ee25533e6de2" containerName="nova-metadata-metadata" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.632329 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3975effe-4c58-4b62-b43f-ee25533e6de2" containerName="nova-metadata-metadata" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.632591 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.632627 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="3975effe-4c58-4b62-b43f-ee25533e6de2" containerName="nova-metadata-metadata" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.632637 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="737eb72e-106d-46eb-901b-d7a6228fae4f" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.632649 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="138caa60-71a7-49ba-9a82-42664b2b2276" containerName="horizon-log" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.632663 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="3975effe-4c58-4b62-b43f-ee25533e6de2" containerName="nova-metadata-log" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.633998 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.640546 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.642307 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.645116 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.645128 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.645339 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.645465 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.648924 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.649089 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.680985 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.803955 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.804038 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.804080 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.804120 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.804190 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21030524-3ea9-4d2c-b620-82c5f58d66bf-logs\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.804238 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkvj4\" (UniqueName: \"kubernetes.io/projected/ee715495-598f-4c76-9399-92846d682bbe-kube-api-access-dkvj4\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.804269 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bk2m7\" (UniqueName: \"kubernetes.io/projected/21030524-3ea9-4d2c-b620-82c5f58d66bf-kube-api-access-bk2m7\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.804319 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-config-data\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.804363 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.804435 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.906608 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.906837 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21030524-3ea9-4d2c-b620-82c5f58d66bf-logs\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.906920 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkvj4\" (UniqueName: \"kubernetes.io/projected/ee715495-598f-4c76-9399-92846d682bbe-kube-api-access-dkvj4\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.906972 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bk2m7\" (UniqueName: \"kubernetes.io/projected/21030524-3ea9-4d2c-b620-82c5f58d66bf-kube-api-access-bk2m7\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.907060 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-config-data\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.907138 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.907217 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.907303 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.907346 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.907390 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.907512 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21030524-3ea9-4d2c-b620-82c5f58d66bf-logs\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.913256 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.913521 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.923157 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.923547 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.924598 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.926651 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-config-data\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.924483 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee715495-598f-4c76-9399-92846d682bbe-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.932456 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkvj4\" (UniqueName: \"kubernetes.io/projected/ee715495-598f-4c76-9399-92846d682bbe-kube-api-access-dkvj4\") pod \"nova-cell1-novncproxy-0\" (UID: \"ee715495-598f-4c76-9399-92846d682bbe\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:24 crc kubenswrapper[4647]: I1128 15:46:24.934624 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bk2m7\" (UniqueName: \"kubernetes.io/projected/21030524-3ea9-4d2c-b620-82c5f58d66bf-kube-api-access-bk2m7\") pod \"nova-metadata-0\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " pod="openstack/nova-metadata-0" Nov 28 15:46:25 crc kubenswrapper[4647]: I1128 15:46:25.024125 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:46:25 crc kubenswrapper[4647]: I1128 15:46:25.041976 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:25 crc kubenswrapper[4647]: I1128 15:46:25.587904 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:25 crc kubenswrapper[4647]: W1128 15:46:25.594873 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21030524_3ea9_4d2c_b620_82c5f58d66bf.slice/crio-c433ab2e8e35654e9a64460dc8879e03aaf1aafcfb982153c1b19f0e88f39bf2 WatchSource:0}: Error finding container c433ab2e8e35654e9a64460dc8879e03aaf1aafcfb982153c1b19f0e88f39bf2: Status 404 returned error can't find the container with id c433ab2e8e35654e9a64460dc8879e03aaf1aafcfb982153c1b19f0e88f39bf2 Nov 28 15:46:25 crc kubenswrapper[4647]: I1128 15:46:25.772979 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:46:26 crc kubenswrapper[4647]: I1128 15:46:26.283133 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ee715495-598f-4c76-9399-92846d682bbe","Type":"ContainerStarted","Data":"bbe89b6a57890dd61b28238baa621be895a176b05aa2272be458a8cc57f0ce4f"} Nov 28 15:46:26 crc kubenswrapper[4647]: I1128 15:46:26.283194 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ee715495-598f-4c76-9399-92846d682bbe","Type":"ContainerStarted","Data":"eb4d3b7c213ba223ea8bf6f56bd0e1ab7ccdd66038d9fd3944bf0b476842c50f"} Nov 28 15:46:26 crc kubenswrapper[4647]: I1128 15:46:26.286744 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"21030524-3ea9-4d2c-b620-82c5f58d66bf","Type":"ContainerStarted","Data":"a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b"} Nov 28 15:46:26 crc kubenswrapper[4647]: I1128 15:46:26.286798 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"21030524-3ea9-4d2c-b620-82c5f58d66bf","Type":"ContainerStarted","Data":"9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885"} Nov 28 15:46:26 crc kubenswrapper[4647]: I1128 15:46:26.286813 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"21030524-3ea9-4d2c-b620-82c5f58d66bf","Type":"ContainerStarted","Data":"c433ab2e8e35654e9a64460dc8879e03aaf1aafcfb982153c1b19f0e88f39bf2"} Nov 28 15:46:26 crc kubenswrapper[4647]: I1128 15:46:26.311844 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.311822324 podStartE2EDuration="2.311822324s" podCreationTimestamp="2025-11-28 15:46:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:26.305712822 +0000 UTC m=+1316.153319263" watchObservedRunningTime="2025-11-28 15:46:26.311822324 +0000 UTC m=+1316.159428745" Nov 28 15:46:26 crc kubenswrapper[4647]: I1128 15:46:26.336296 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.336276503 podStartE2EDuration="2.336276503s" podCreationTimestamp="2025-11-28 15:46:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:26.328684091 +0000 UTC m=+1316.176290522" watchObservedRunningTime="2025-11-28 15:46:26.336276503 +0000 UTC m=+1316.183882924" Nov 28 15:46:26 crc kubenswrapper[4647]: I1128 15:46:26.406211 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3975effe-4c58-4b62-b43f-ee25533e6de2" path="/var/lib/kubelet/pods/3975effe-4c58-4b62-b43f-ee25533e6de2/volumes" Nov 28 15:46:26 crc kubenswrapper[4647]: I1128 15:46:26.407196 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="737eb72e-106d-46eb-901b-d7a6228fae4f" path="/var/lib/kubelet/pods/737eb72e-106d-46eb-901b-d7a6228fae4f/volumes" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.423976 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.424671 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.425148 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.425216 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.431079 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.436785 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.715390 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-vgkth"] Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.717031 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.748987 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-vgkth"] Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.807929 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.807991 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.808037 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.808093 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75qpl\" (UniqueName: \"kubernetes.io/projected/3170fd20-182e-4cc7-b636-d6173b6c7f08-kube-api-access-75qpl\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.808127 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.808317 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-config\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.910836 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.910903 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.910941 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75qpl\" (UniqueName: \"kubernetes.io/projected/3170fd20-182e-4cc7-b636-d6173b6c7f08-kube-api-access-75qpl\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.910965 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.911070 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-config\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.911179 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.911769 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.915531 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.916834 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.916973 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-config\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.917853 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:28 crc kubenswrapper[4647]: I1128 15:46:28.936153 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75qpl\" (UniqueName: \"kubernetes.io/projected/3170fd20-182e-4cc7-b636-d6173b6c7f08-kube-api-access-75qpl\") pod \"dnsmasq-dns-5c7b6c5df9-vgkth\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:29 crc kubenswrapper[4647]: I1128 15:46:29.069706 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:29 crc kubenswrapper[4647]: I1128 15:46:29.667204 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-vgkth"] Nov 28 15:46:30 crc kubenswrapper[4647]: I1128 15:46:30.025540 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:46:30 crc kubenswrapper[4647]: I1128 15:46:30.025586 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:46:30 crc kubenswrapper[4647]: I1128 15:46:30.042377 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:30 crc kubenswrapper[4647]: I1128 15:46:30.341038 4647 generic.go:334] "Generic (PLEG): container finished" podID="3170fd20-182e-4cc7-b636-d6173b6c7f08" containerID="19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5" exitCode=0 Nov 28 15:46:30 crc kubenswrapper[4647]: I1128 15:46:30.342429 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" event={"ID":"3170fd20-182e-4cc7-b636-d6173b6c7f08","Type":"ContainerDied","Data":"19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5"} Nov 28 15:46:30 crc kubenswrapper[4647]: I1128 15:46:30.342469 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" event={"ID":"3170fd20-182e-4cc7-b636-d6173b6c7f08","Type":"ContainerStarted","Data":"1811300758efed455c221aa2827e63d8677bb8c9b11551ff30e668df70bdf458"} Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.353234 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" event={"ID":"3170fd20-182e-4cc7-b636-d6173b6c7f08","Type":"ContainerStarted","Data":"d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704"} Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.354328 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.380954 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.381540 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="ceilometer-central-agent" containerID="cri-o://515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9" gracePeriod=30 Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.383405 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="proxy-httpd" containerID="cri-o://f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217" gracePeriod=30 Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.383579 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="sg-core" containerID="cri-o://19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384" gracePeriod=30 Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.383698 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="ceilometer-notification-agent" containerID="cri-o://1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3" gracePeriod=30 Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.386384 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" podStartSLOduration=3.386363645 podStartE2EDuration="3.386363645s" podCreationTimestamp="2025-11-28 15:46:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:31.380128039 +0000 UTC m=+1321.227734480" watchObservedRunningTime="2025-11-28 15:46:31.386363645 +0000 UTC m=+1321.233970066" Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.396434 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.192:3000/\": EOF" Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.533826 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.534054 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" containerName="nova-api-log" containerID="cri-o://23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4" gracePeriod=30 Nov 28 15:46:31 crc kubenswrapper[4647]: I1128 15:46:31.534470 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" containerName="nova-api-api" containerID="cri-o://b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089" gracePeriod=30 Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.236251 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.366592 4647 generic.go:334] "Generic (PLEG): container finished" podID="b62800be-3750-4388-b919-a8ad1373a9b4" containerID="23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4" exitCode=143 Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.366655 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b62800be-3750-4388-b919-a8ad1373a9b4","Type":"ContainerDied","Data":"23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4"} Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.369917 4647 generic.go:334] "Generic (PLEG): container finished" podID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerID="f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217" exitCode=0 Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.369941 4647 generic.go:334] "Generic (PLEG): container finished" podID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerID="19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384" exitCode=2 Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.369949 4647 generic.go:334] "Generic (PLEG): container finished" podID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerID="1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3" exitCode=0 Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.369956 4647 generic.go:334] "Generic (PLEG): container finished" podID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerID="515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9" exitCode=0 Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.370826 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.370874 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9be811f3-c9ec-4cc0-842f-3c831cdae7f0","Type":"ContainerDied","Data":"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217"} Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.370932 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9be811f3-c9ec-4cc0-842f-3c831cdae7f0","Type":"ContainerDied","Data":"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384"} Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.370946 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9be811f3-c9ec-4cc0-842f-3c831cdae7f0","Type":"ContainerDied","Data":"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3"} Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.370956 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9be811f3-c9ec-4cc0-842f-3c831cdae7f0","Type":"ContainerDied","Data":"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9"} Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.370972 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9be811f3-c9ec-4cc0-842f-3c831cdae7f0","Type":"ContainerDied","Data":"2ef50c816bc0f121bc3ccdab174dca1918beef82beaf7e7b1b2427e3ce0ae0a7"} Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.370992 4647 scope.go:117] "RemoveContainer" containerID="f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.395143 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-sg-core-conf-yaml\") pod \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.395610 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-ceilometer-tls-certs\") pod \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.395719 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-config-data\") pod \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.395843 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-run-httpd\") pod \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.395896 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-log-httpd\") pod \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.396112 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pljv8\" (UniqueName: \"kubernetes.io/projected/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-kube-api-access-pljv8\") pod \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.396196 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-combined-ca-bundle\") pod \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.396381 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9be811f3-c9ec-4cc0-842f-3c831cdae7f0" (UID: "9be811f3-c9ec-4cc0-842f-3c831cdae7f0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.396658 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9be811f3-c9ec-4cc0-842f-3c831cdae7f0" (UID: "9be811f3-c9ec-4cc0-842f-3c831cdae7f0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.396967 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-scripts\") pod \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\" (UID: \"9be811f3-c9ec-4cc0-842f-3c831cdae7f0\") " Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.397710 4647 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.397776 4647 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.409915 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-kube-api-access-pljv8" (OuterVolumeSpecName: "kube-api-access-pljv8") pod "9be811f3-c9ec-4cc0-842f-3c831cdae7f0" (UID: "9be811f3-c9ec-4cc0-842f-3c831cdae7f0"). InnerVolumeSpecName "kube-api-access-pljv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.412070 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-scripts" (OuterVolumeSpecName: "scripts") pod "9be811f3-c9ec-4cc0-842f-3c831cdae7f0" (UID: "9be811f3-c9ec-4cc0-842f-3c831cdae7f0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.426555 4647 scope.go:117] "RemoveContainer" containerID="19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.478477 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9be811f3-c9ec-4cc0-842f-3c831cdae7f0" (UID: "9be811f3-c9ec-4cc0-842f-3c831cdae7f0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.479495 4647 scope.go:117] "RemoveContainer" containerID="1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.499924 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.503166 4647 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.503257 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pljv8\" (UniqueName: \"kubernetes.io/projected/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-kube-api-access-pljv8\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.503400 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "9be811f3-c9ec-4cc0-842f-3c831cdae7f0" (UID: "9be811f3-c9ec-4cc0-842f-3c831cdae7f0"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.512331 4647 scope.go:117] "RemoveContainer" containerID="515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.533227 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9be811f3-c9ec-4cc0-842f-3c831cdae7f0" (UID: "9be811f3-c9ec-4cc0-842f-3c831cdae7f0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.576881 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-config-data" (OuterVolumeSpecName: "config-data") pod "9be811f3-c9ec-4cc0-842f-3c831cdae7f0" (UID: "9be811f3-c9ec-4cc0-842f-3c831cdae7f0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.605701 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.605742 4647 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.605757 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9be811f3-c9ec-4cc0-842f-3c831cdae7f0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.688138 4647 scope.go:117] "RemoveContainer" containerID="f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217" Nov 28 15:46:32 crc kubenswrapper[4647]: E1128 15:46:32.688741 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217\": container with ID starting with f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217 not found: ID does not exist" containerID="f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.688834 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217"} err="failed to get container status \"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217\": rpc error: code = NotFound desc = could not find container \"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217\": container with ID starting with f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.688932 4647 scope.go:117] "RemoveContainer" containerID="19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384" Nov 28 15:46:32 crc kubenswrapper[4647]: E1128 15:46:32.689470 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384\": container with ID starting with 19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384 not found: ID does not exist" containerID="19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.689504 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384"} err="failed to get container status \"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384\": rpc error: code = NotFound desc = could not find container \"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384\": container with ID starting with 19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.689525 4647 scope.go:117] "RemoveContainer" containerID="1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3" Nov 28 15:46:32 crc kubenswrapper[4647]: E1128 15:46:32.689844 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3\": container with ID starting with 1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3 not found: ID does not exist" containerID="1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.689932 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3"} err="failed to get container status \"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3\": rpc error: code = NotFound desc = could not find container \"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3\": container with ID starting with 1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.690006 4647 scope.go:117] "RemoveContainer" containerID="515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9" Nov 28 15:46:32 crc kubenswrapper[4647]: E1128 15:46:32.690360 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9\": container with ID starting with 515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9 not found: ID does not exist" containerID="515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.690419 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9"} err="failed to get container status \"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9\": rpc error: code = NotFound desc = could not find container \"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9\": container with ID starting with 515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.690449 4647 scope.go:117] "RemoveContainer" containerID="f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.690796 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217"} err="failed to get container status \"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217\": rpc error: code = NotFound desc = could not find container \"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217\": container with ID starting with f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.690822 4647 scope.go:117] "RemoveContainer" containerID="19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691039 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384"} err="failed to get container status \"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384\": rpc error: code = NotFound desc = could not find container \"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384\": container with ID starting with 19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691060 4647 scope.go:117] "RemoveContainer" containerID="1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691240 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3"} err="failed to get container status \"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3\": rpc error: code = NotFound desc = could not find container \"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3\": container with ID starting with 1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691259 4647 scope.go:117] "RemoveContainer" containerID="515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691444 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9"} err="failed to get container status \"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9\": rpc error: code = NotFound desc = could not find container \"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9\": container with ID starting with 515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691461 4647 scope.go:117] "RemoveContainer" containerID="f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691619 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217"} err="failed to get container status \"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217\": rpc error: code = NotFound desc = could not find container \"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217\": container with ID starting with f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691631 4647 scope.go:117] "RemoveContainer" containerID="19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691787 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384"} err="failed to get container status \"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384\": rpc error: code = NotFound desc = could not find container \"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384\": container with ID starting with 19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691799 4647 scope.go:117] "RemoveContainer" containerID="1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.691983 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3"} err="failed to get container status \"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3\": rpc error: code = NotFound desc = could not find container \"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3\": container with ID starting with 1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.692007 4647 scope.go:117] "RemoveContainer" containerID="515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.692266 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9"} err="failed to get container status \"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9\": rpc error: code = NotFound desc = could not find container \"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9\": container with ID starting with 515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.692292 4647 scope.go:117] "RemoveContainer" containerID="f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.692529 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217"} err="failed to get container status \"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217\": rpc error: code = NotFound desc = could not find container \"f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217\": container with ID starting with f498dffae3eeff1c6dc5335273becc6f2f3c16601c78aa333f1b344293412217 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.692555 4647 scope.go:117] "RemoveContainer" containerID="19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.692763 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384"} err="failed to get container status \"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384\": rpc error: code = NotFound desc = could not find container \"19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384\": container with ID starting with 19cdb6dce5e50f9064bc7ad603374b225bae168bccbaeaff84a5f7662c7c7384 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.692783 4647 scope.go:117] "RemoveContainer" containerID="1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.693055 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3"} err="failed to get container status \"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3\": rpc error: code = NotFound desc = could not find container \"1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3\": container with ID starting with 1102f5ec060b3c6bcdd28cbfbf4706c9585b36c735b0613b3a03ecebe1daafa3 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.693074 4647 scope.go:117] "RemoveContainer" containerID="515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.693393 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9"} err="failed to get container status \"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9\": rpc error: code = NotFound desc = could not find container \"515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9\": container with ID starting with 515016019eef2964b9e89b18b1a85f2a41487f14cdbcbbbb4cdcd46370a324b9 not found: ID does not exist" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.719124 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.734265 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.763360 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:32 crc kubenswrapper[4647]: E1128 15:46:32.763773 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="ceilometer-notification-agent" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.763790 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="ceilometer-notification-agent" Nov 28 15:46:32 crc kubenswrapper[4647]: E1128 15:46:32.763805 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="sg-core" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.763811 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="sg-core" Nov 28 15:46:32 crc kubenswrapper[4647]: E1128 15:46:32.763827 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="ceilometer-central-agent" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.763834 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="ceilometer-central-agent" Nov 28 15:46:32 crc kubenswrapper[4647]: E1128 15:46:32.763844 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="proxy-httpd" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.763850 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="proxy-httpd" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.764022 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="sg-core" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.764038 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="ceilometer-central-agent" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.764055 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="proxy-httpd" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.764068 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" containerName="ceilometer-notification-agent" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.765723 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.768718 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.768954 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.768955 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.815257 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.911914 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4dbm\" (UniqueName: \"kubernetes.io/projected/41a1a869-93b8-4ef2-9a10-a124c3e23e01-kube-api-access-m4dbm\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.912179 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-scripts\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.912309 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-config-data\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.912430 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-run-httpd\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.912563 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.912645 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-log-httpd\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.912754 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:32 crc kubenswrapper[4647]: I1128 15:46:32.912847 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.014382 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-run-httpd\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.014500 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.014525 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-log-httpd\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.014575 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.014593 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.014619 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4dbm\" (UniqueName: \"kubernetes.io/projected/41a1a869-93b8-4ef2-9a10-a124c3e23e01-kube-api-access-m4dbm\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.014653 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-scripts\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.014681 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-config-data\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.015711 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-log-httpd\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.016148 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-run-httpd\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.023968 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.024069 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.024215 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-config-data\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.039929 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4dbm\" (UniqueName: \"kubernetes.io/projected/41a1a869-93b8-4ef2-9a10-a124c3e23e01-kube-api-access-m4dbm\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.043048 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.043726 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-scripts\") pod \"ceilometer-0\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.083888 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:33 crc kubenswrapper[4647]: W1128 15:46:33.559064 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod41a1a869_93b8_4ef2_9a10_a124c3e23e01.slice/crio-a5ef40c5ce5816fd0b8ee690f0014b6486eca64a24eca403c615766c5cea8fa5 WatchSource:0}: Error finding container a5ef40c5ce5816fd0b8ee690f0014b6486eca64a24eca403c615766c5cea8fa5: Status 404 returned error can't find the container with id a5ef40c5ce5816fd0b8ee690f0014b6486eca64a24eca403c615766c5cea8fa5 Nov 28 15:46:33 crc kubenswrapper[4647]: I1128 15:46:33.566093 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:34 crc kubenswrapper[4647]: I1128 15:46:34.054695 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:34 crc kubenswrapper[4647]: I1128 15:46:34.440975 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9be811f3-c9ec-4cc0-842f-3c831cdae7f0" path="/var/lib/kubelet/pods/9be811f3-c9ec-4cc0-842f-3c831cdae7f0/volumes" Nov 28 15:46:34 crc kubenswrapper[4647]: I1128 15:46:34.443731 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"41a1a869-93b8-4ef2-9a10-a124c3e23e01","Type":"ContainerStarted","Data":"227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0"} Nov 28 15:46:34 crc kubenswrapper[4647]: I1128 15:46:34.443777 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"41a1a869-93b8-4ef2-9a10-a124c3e23e01","Type":"ContainerStarted","Data":"a5ef40c5ce5816fd0b8ee690f0014b6486eca64a24eca403c615766c5cea8fa5"} Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.026658 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.026697 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.043906 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.082606 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.239115 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.366020 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44xkm\" (UniqueName: \"kubernetes.io/projected/b62800be-3750-4388-b919-a8ad1373a9b4-kube-api-access-44xkm\") pod \"b62800be-3750-4388-b919-a8ad1373a9b4\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.367057 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-combined-ca-bundle\") pod \"b62800be-3750-4388-b919-a8ad1373a9b4\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.367463 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b62800be-3750-4388-b919-a8ad1373a9b4-logs\") pod \"b62800be-3750-4388-b919-a8ad1373a9b4\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.367580 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-config-data\") pod \"b62800be-3750-4388-b919-a8ad1373a9b4\" (UID: \"b62800be-3750-4388-b919-a8ad1373a9b4\") " Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.368928 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b62800be-3750-4388-b919-a8ad1373a9b4-logs" (OuterVolumeSpecName: "logs") pod "b62800be-3750-4388-b919-a8ad1373a9b4" (UID: "b62800be-3750-4388-b919-a8ad1373a9b4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.388611 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b62800be-3750-4388-b919-a8ad1373a9b4-kube-api-access-44xkm" (OuterVolumeSpecName: "kube-api-access-44xkm") pod "b62800be-3750-4388-b919-a8ad1373a9b4" (UID: "b62800be-3750-4388-b919-a8ad1373a9b4"). InnerVolumeSpecName "kube-api-access-44xkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.425395 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b62800be-3750-4388-b919-a8ad1373a9b4" (UID: "b62800be-3750-4388-b919-a8ad1373a9b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.433374 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-config-data" (OuterVolumeSpecName: "config-data") pod "b62800be-3750-4388-b919-a8ad1373a9b4" (UID: "b62800be-3750-4388-b919-a8ad1373a9b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.453725 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"41a1a869-93b8-4ef2-9a10-a124c3e23e01","Type":"ContainerStarted","Data":"bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd"} Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.467003 4647 generic.go:334] "Generic (PLEG): container finished" podID="b62800be-3750-4388-b919-a8ad1373a9b4" containerID="b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089" exitCode=0 Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.467529 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.467665 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b62800be-3750-4388-b919-a8ad1373a9b4","Type":"ContainerDied","Data":"b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089"} Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.467771 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b62800be-3750-4388-b919-a8ad1373a9b4","Type":"ContainerDied","Data":"41674f62d16d13961f960054b6d391684b5e42160abb2dc5773d5c80953203db"} Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.467859 4647 scope.go:117] "RemoveContainer" containerID="b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.469584 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.469615 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b62800be-3750-4388-b919-a8ad1373a9b4-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.469625 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b62800be-3750-4388-b919-a8ad1373a9b4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.469634 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44xkm\" (UniqueName: \"kubernetes.io/projected/b62800be-3750-4388-b919-a8ad1373a9b4-kube-api-access-44xkm\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.519662 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.523675 4647 scope.go:117] "RemoveContainer" containerID="23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.529070 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.546613 4647 scope.go:117] "RemoveContainer" containerID="b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.550767 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:46:35 crc kubenswrapper[4647]: E1128 15:46:35.554351 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089\": container with ID starting with b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089 not found: ID does not exist" containerID="b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.554399 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089"} err="failed to get container status \"b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089\": rpc error: code = NotFound desc = could not find container \"b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089\": container with ID starting with b7904796cf232c67972760e6d054b5ac67bdb1ffe531f83a7d72109c32f3d089 not found: ID does not exist" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.554439 4647 scope.go:117] "RemoveContainer" containerID="23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4" Nov 28 15:46:35 crc kubenswrapper[4647]: E1128 15:46:35.554699 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4\": container with ID starting with 23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4 not found: ID does not exist" containerID="23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.554719 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4"} err="failed to get container status \"23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4\": rpc error: code = NotFound desc = could not find container \"23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4\": container with ID starting with 23ec6d82d49768dbe8f80961d6bf424879b1e63936851c39feddfd2fbe7ae3e4 not found: ID does not exist" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.575544 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:35 crc kubenswrapper[4647]: E1128 15:46:35.576034 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" containerName="nova-api-log" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.576052 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" containerName="nova-api-log" Nov 28 15:46:35 crc kubenswrapper[4647]: E1128 15:46:35.576073 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" containerName="nova-api-api" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.576079 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" containerName="nova-api-api" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.576299 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" containerName="nova-api-log" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.576326 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" containerName="nova-api-api" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.577667 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.586388 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.586592 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.586706 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.612494 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.676004 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-public-tls-certs\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.676465 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.676507 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbvzl\" (UniqueName: \"kubernetes.io/projected/bdf5529a-83da-4e7d-99ff-863aa224e295-kube-api-access-tbvzl\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.676537 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bdf5529a-83da-4e7d-99ff-863aa224e295-logs\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.676593 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.676622 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-config-data\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.779560 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-public-tls-certs\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.779657 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.779691 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbvzl\" (UniqueName: \"kubernetes.io/projected/bdf5529a-83da-4e7d-99ff-863aa224e295-kube-api-access-tbvzl\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.779723 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bdf5529a-83da-4e7d-99ff-863aa224e295-logs\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.779775 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.779796 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-config-data\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.781852 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bdf5529a-83da-4e7d-99ff-863aa224e295-logs\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.784214 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-public-tls-certs\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.785870 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-config-data\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.789036 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.790608 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.805864 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-rn7j8"] Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.807354 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.809845 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.811956 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbvzl\" (UniqueName: \"kubernetes.io/projected/bdf5529a-83da-4e7d-99ff-863aa224e295-kube-api-access-tbvzl\") pod \"nova-api-0\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.812511 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.844581 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-rn7j8"] Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.925338 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.986024 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87bvz\" (UniqueName: \"kubernetes.io/projected/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-kube-api-access-87bvz\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.986073 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-config-data\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.986110 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-scripts\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:35 crc kubenswrapper[4647]: I1128 15:46:35.986149 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.088122 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87bvz\" (UniqueName: \"kubernetes.io/projected/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-kube-api-access-87bvz\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.088392 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-config-data\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.088436 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-scripts\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.088475 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.093373 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-config-data\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.095481 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.101466 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-scripts\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.108700 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.195:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.109099 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.195:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.126565 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87bvz\" (UniqueName: \"kubernetes.io/projected/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-kube-api-access-87bvz\") pod \"nova-cell1-cell-mapping-rn7j8\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.219912 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.461965 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b62800be-3750-4388-b919-a8ad1373a9b4" path="/var/lib/kubelet/pods/b62800be-3750-4388-b919-a8ad1373a9b4/volumes" Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.520803 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"41a1a869-93b8-4ef2-9a10-a124c3e23e01","Type":"ContainerStarted","Data":"66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58"} Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.547728 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:36 crc kubenswrapper[4647]: I1128 15:46:36.651221 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-rn7j8"] Nov 28 15:46:37 crc kubenswrapper[4647]: I1128 15:46:37.530942 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bdf5529a-83da-4e7d-99ff-863aa224e295","Type":"ContainerStarted","Data":"e0dcab250ac246a3ab87ea8ee0088a98e03c53dbe78581a030c8e0d385536c37"} Nov 28 15:46:37 crc kubenswrapper[4647]: I1128 15:46:37.531507 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bdf5529a-83da-4e7d-99ff-863aa224e295","Type":"ContainerStarted","Data":"b42f33841578edd05d39619112f024d17be977f735ff6c43737ab37e8c25948b"} Nov 28 15:46:37 crc kubenswrapper[4647]: I1128 15:46:37.535761 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rn7j8" event={"ID":"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a","Type":"ContainerStarted","Data":"f2fa85d4ce8112b42f15c8b82ea2fb49518875673410fc61a22c12326647d965"} Nov 28 15:46:37 crc kubenswrapper[4647]: I1128 15:46:37.536287 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rn7j8" event={"ID":"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a","Type":"ContainerStarted","Data":"457dc60a73b24e6583549e34adc14d2cd55722ec0073dbefa2cfde397db465a9"} Nov 28 15:46:37 crc kubenswrapper[4647]: I1128 15:46:37.559873 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-rn7j8" podStartSLOduration=2.559857526 podStartE2EDuration="2.559857526s" podCreationTimestamp="2025-11-28 15:46:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:37.558339666 +0000 UTC m=+1327.405946087" watchObservedRunningTime="2025-11-28 15:46:37.559857526 +0000 UTC m=+1327.407463947" Nov 28 15:46:38 crc kubenswrapper[4647]: I1128 15:46:38.547282 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"41a1a869-93b8-4ef2-9a10-a124c3e23e01","Type":"ContainerStarted","Data":"579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65"} Nov 28 15:46:38 crc kubenswrapper[4647]: I1128 15:46:38.548774 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:46:38 crc kubenswrapper[4647]: I1128 15:46:38.547495 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="proxy-httpd" containerID="cri-o://579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65" gracePeriod=30 Nov 28 15:46:38 crc kubenswrapper[4647]: I1128 15:46:38.547510 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="ceilometer-notification-agent" containerID="cri-o://bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd" gracePeriod=30 Nov 28 15:46:38 crc kubenswrapper[4647]: I1128 15:46:38.547517 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="sg-core" containerID="cri-o://66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58" gracePeriod=30 Nov 28 15:46:38 crc kubenswrapper[4647]: I1128 15:46:38.547441 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="ceilometer-central-agent" containerID="cri-o://227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0" gracePeriod=30 Nov 28 15:46:38 crc kubenswrapper[4647]: I1128 15:46:38.560339 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bdf5529a-83da-4e7d-99ff-863aa224e295","Type":"ContainerStarted","Data":"b4f8012c25d170ece7c7ee6a76eb59a816710cd037430329f6857f3225263e3f"} Nov 28 15:46:38 crc kubenswrapper[4647]: I1128 15:46:38.575976 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.578125711 podStartE2EDuration="6.575875697s" podCreationTimestamp="2025-11-28 15:46:32 +0000 UTC" firstStartedPulling="2025-11-28 15:46:33.563209189 +0000 UTC m=+1323.410815610" lastFinishedPulling="2025-11-28 15:46:37.560959175 +0000 UTC m=+1327.408565596" observedRunningTime="2025-11-28 15:46:38.568776819 +0000 UTC m=+1328.416383240" watchObservedRunningTime="2025-11-28 15:46:38.575875697 +0000 UTC m=+1328.423482118" Nov 28 15:46:38 crc kubenswrapper[4647]: I1128 15:46:38.600009 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.599992317 podStartE2EDuration="3.599992317s" podCreationTimestamp="2025-11-28 15:46:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:38.591389309 +0000 UTC m=+1328.438995720" watchObservedRunningTime="2025-11-28 15:46:38.599992317 +0000 UTC m=+1328.447598738" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.071561 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.142523 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-nnd25"] Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.142766 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" podUID="7b1c9cff-79b5-4c05-b180-95573c9ab889" containerName="dnsmasq-dns" containerID="cri-o://4a1eb9a9a8b3713123364bc0d156340a60f06c57fd216610e314c78ca31c98b4" gracePeriod=10 Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.579583 4647 generic.go:334] "Generic (PLEG): container finished" podID="7b1c9cff-79b5-4c05-b180-95573c9ab889" containerID="4a1eb9a9a8b3713123364bc0d156340a60f06c57fd216610e314c78ca31c98b4" exitCode=0 Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.579652 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" event={"ID":"7b1c9cff-79b5-4c05-b180-95573c9ab889","Type":"ContainerDied","Data":"4a1eb9a9a8b3713123364bc0d156340a60f06c57fd216610e314c78ca31c98b4"} Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.586178 4647 generic.go:334] "Generic (PLEG): container finished" podID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerID="579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65" exitCode=0 Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.586211 4647 generic.go:334] "Generic (PLEG): container finished" podID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerID="66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58" exitCode=2 Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.586221 4647 generic.go:334] "Generic (PLEG): container finished" podID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerID="bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd" exitCode=0 Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.587196 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"41a1a869-93b8-4ef2-9a10-a124c3e23e01","Type":"ContainerDied","Data":"579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65"} Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.587229 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"41a1a869-93b8-4ef2-9a10-a124c3e23e01","Type":"ContainerDied","Data":"66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58"} Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.587243 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"41a1a869-93b8-4ef2-9a10-a124c3e23e01","Type":"ContainerDied","Data":"bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd"} Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.683254 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.793102 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-svc\") pod \"7b1c9cff-79b5-4c05-b180-95573c9ab889\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.793544 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-nb\") pod \"7b1c9cff-79b5-4c05-b180-95573c9ab889\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.793617 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-sb\") pod \"7b1c9cff-79b5-4c05-b180-95573c9ab889\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.793729 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-config\") pod \"7b1c9cff-79b5-4c05-b180-95573c9ab889\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.793890 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54r86\" (UniqueName: \"kubernetes.io/projected/7b1c9cff-79b5-4c05-b180-95573c9ab889-kube-api-access-54r86\") pod \"7b1c9cff-79b5-4c05-b180-95573c9ab889\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.793954 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-swift-storage-0\") pod \"7b1c9cff-79b5-4c05-b180-95573c9ab889\" (UID: \"7b1c9cff-79b5-4c05-b180-95573c9ab889\") " Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.810517 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b1c9cff-79b5-4c05-b180-95573c9ab889-kube-api-access-54r86" (OuterVolumeSpecName: "kube-api-access-54r86") pod "7b1c9cff-79b5-4c05-b180-95573c9ab889" (UID: "7b1c9cff-79b5-4c05-b180-95573c9ab889"). InnerVolumeSpecName "kube-api-access-54r86". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.862257 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7b1c9cff-79b5-4c05-b180-95573c9ab889" (UID: "7b1c9cff-79b5-4c05-b180-95573c9ab889"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.866314 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7b1c9cff-79b5-4c05-b180-95573c9ab889" (UID: "7b1c9cff-79b5-4c05-b180-95573c9ab889"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.871354 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7b1c9cff-79b5-4c05-b180-95573c9ab889" (UID: "7b1c9cff-79b5-4c05-b180-95573c9ab889"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.884981 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-config" (OuterVolumeSpecName: "config") pod "7b1c9cff-79b5-4c05-b180-95573c9ab889" (UID: "7b1c9cff-79b5-4c05-b180-95573c9ab889"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.896917 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.896952 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54r86\" (UniqueName: \"kubernetes.io/projected/7b1c9cff-79b5-4c05-b180-95573c9ab889-kube-api-access-54r86\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.896970 4647 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.896984 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.896994 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.902775 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7b1c9cff-79b5-4c05-b180-95573c9ab889" (UID: "7b1c9cff-79b5-4c05-b180-95573c9ab889"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:46:39 crc kubenswrapper[4647]: I1128 15:46:39.998712 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b1c9cff-79b5-4c05-b180-95573c9ab889-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:40 crc kubenswrapper[4647]: I1128 15:46:40.598927 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" event={"ID":"7b1c9cff-79b5-4c05-b180-95573c9ab889","Type":"ContainerDied","Data":"928e895f0ed229fdbd0515e5008fb556d0945b2dd663785f832f44df1ead3051"} Nov 28 15:46:40 crc kubenswrapper[4647]: I1128 15:46:40.598990 4647 scope.go:117] "RemoveContainer" containerID="4a1eb9a9a8b3713123364bc0d156340a60f06c57fd216610e314c78ca31c98b4" Nov 28 15:46:40 crc kubenswrapper[4647]: I1128 15:46:40.599158 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-nnd25" Nov 28 15:46:40 crc kubenswrapper[4647]: I1128 15:46:40.632078 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-nnd25"] Nov 28 15:46:40 crc kubenswrapper[4647]: I1128 15:46:40.642857 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-nnd25"] Nov 28 15:46:40 crc kubenswrapper[4647]: I1128 15:46:40.651049 4647 scope.go:117] "RemoveContainer" containerID="926a2795eff3e6f6073f0cea90ad0bed06e58ea39d463fd3e2199416230b52e4" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.167100 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.270656 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-scripts\") pod \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.270716 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-sg-core-conf-yaml\") pod \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.270759 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-log-httpd\") pod \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.270783 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4dbm\" (UniqueName: \"kubernetes.io/projected/41a1a869-93b8-4ef2-9a10-a124c3e23e01-kube-api-access-m4dbm\") pod \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.270830 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-run-httpd\") pod \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.270858 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-combined-ca-bundle\") pod \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.270917 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-config-data\") pod \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.270942 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-ceilometer-tls-certs\") pod \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\" (UID: \"41a1a869-93b8-4ef2-9a10-a124c3e23e01\") " Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.273112 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "41a1a869-93b8-4ef2-9a10-a124c3e23e01" (UID: "41a1a869-93b8-4ef2-9a10-a124c3e23e01"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.273358 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "41a1a869-93b8-4ef2-9a10-a124c3e23e01" (UID: "41a1a869-93b8-4ef2-9a10-a124c3e23e01"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.277034 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-scripts" (OuterVolumeSpecName: "scripts") pod "41a1a869-93b8-4ef2-9a10-a124c3e23e01" (UID: "41a1a869-93b8-4ef2-9a10-a124c3e23e01"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.278008 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41a1a869-93b8-4ef2-9a10-a124c3e23e01-kube-api-access-m4dbm" (OuterVolumeSpecName: "kube-api-access-m4dbm") pod "41a1a869-93b8-4ef2-9a10-a124c3e23e01" (UID: "41a1a869-93b8-4ef2-9a10-a124c3e23e01"). InnerVolumeSpecName "kube-api-access-m4dbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.322905 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "41a1a869-93b8-4ef2-9a10-a124c3e23e01" (UID: "41a1a869-93b8-4ef2-9a10-a124c3e23e01"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.339069 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "41a1a869-93b8-4ef2-9a10-a124c3e23e01" (UID: "41a1a869-93b8-4ef2-9a10-a124c3e23e01"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.365479 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41a1a869-93b8-4ef2-9a10-a124c3e23e01" (UID: "41a1a869-93b8-4ef2-9a10-a124c3e23e01"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.373385 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.373543 4647 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.373627 4647 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.373712 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4dbm\" (UniqueName: \"kubernetes.io/projected/41a1a869-93b8-4ef2-9a10-a124c3e23e01-kube-api-access-m4dbm\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.373797 4647 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/41a1a869-93b8-4ef2-9a10-a124c3e23e01-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.373879 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.373955 4647 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.403842 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-config-data" (OuterVolumeSpecName: "config-data") pod "41a1a869-93b8-4ef2-9a10-a124c3e23e01" (UID: "41a1a869-93b8-4ef2-9a10-a124c3e23e01"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.406490 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b1c9cff-79b5-4c05-b180-95573c9ab889" path="/var/lib/kubelet/pods/7b1c9cff-79b5-4c05-b180-95573c9ab889/volumes" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.477711 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41a1a869-93b8-4ef2-9a10-a124c3e23e01-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.625952 4647 generic.go:334] "Generic (PLEG): container finished" podID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerID="227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0" exitCode=0 Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.626003 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"41a1a869-93b8-4ef2-9a10-a124c3e23e01","Type":"ContainerDied","Data":"227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0"} Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.626046 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"41a1a869-93b8-4ef2-9a10-a124c3e23e01","Type":"ContainerDied","Data":"a5ef40c5ce5816fd0b8ee690f0014b6486eca64a24eca403c615766c5cea8fa5"} Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.626067 4647 scope.go:117] "RemoveContainer" containerID="579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.627739 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.652473 4647 scope.go:117] "RemoveContainer" containerID="66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.679538 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.688551 4647 scope.go:117] "RemoveContainer" containerID="bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.700952 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.720367 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:42 crc kubenswrapper[4647]: E1128 15:46:42.720825 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b1c9cff-79b5-4c05-b180-95573c9ab889" containerName="dnsmasq-dns" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.720844 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b1c9cff-79b5-4c05-b180-95573c9ab889" containerName="dnsmasq-dns" Nov 28 15:46:42 crc kubenswrapper[4647]: E1128 15:46:42.720880 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="proxy-httpd" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.720889 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="proxy-httpd" Nov 28 15:46:42 crc kubenswrapper[4647]: E1128 15:46:42.720898 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="ceilometer-notification-agent" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.720905 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="ceilometer-notification-agent" Nov 28 15:46:42 crc kubenswrapper[4647]: E1128 15:46:42.720915 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="ceilometer-central-agent" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.720923 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="ceilometer-central-agent" Nov 28 15:46:42 crc kubenswrapper[4647]: E1128 15:46:42.720932 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b1c9cff-79b5-4c05-b180-95573c9ab889" containerName="init" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.720939 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b1c9cff-79b5-4c05-b180-95573c9ab889" containerName="init" Nov 28 15:46:42 crc kubenswrapper[4647]: E1128 15:46:42.720956 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="sg-core" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.720962 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="sg-core" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.721130 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="proxy-httpd" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.721156 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="ceilometer-notification-agent" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.721170 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b1c9cff-79b5-4c05-b180-95573c9ab889" containerName="dnsmasq-dns" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.721178 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="sg-core" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.721195 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" containerName="ceilometer-central-agent" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.722827 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.726879 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.727082 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.727195 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.738866 4647 scope.go:117] "RemoveContainer" containerID="227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.752293 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.789539 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.789577 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.789621 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.789648 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-config-data\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.789695 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa9888f9-1fbf-439a-978c-77b304679edf-run-httpd\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.789718 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-scripts\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.789756 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnrzg\" (UniqueName: \"kubernetes.io/projected/aa9888f9-1fbf-439a-978c-77b304679edf-kube-api-access-wnrzg\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.789787 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa9888f9-1fbf-439a-978c-77b304679edf-log-httpd\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.805072 4647 scope.go:117] "RemoveContainer" containerID="579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65" Nov 28 15:46:42 crc kubenswrapper[4647]: E1128 15:46:42.806520 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65\": container with ID starting with 579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65 not found: ID does not exist" containerID="579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.806559 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65"} err="failed to get container status \"579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65\": rpc error: code = NotFound desc = could not find container \"579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65\": container with ID starting with 579c4fc49bd341db4e47985e87a61a0134e10ff28e50839e98fa4a34b8563f65 not found: ID does not exist" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.806591 4647 scope.go:117] "RemoveContainer" containerID="66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58" Nov 28 15:46:42 crc kubenswrapper[4647]: E1128 15:46:42.806902 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58\": container with ID starting with 66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58 not found: ID does not exist" containerID="66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.806928 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58"} err="failed to get container status \"66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58\": rpc error: code = NotFound desc = could not find container \"66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58\": container with ID starting with 66777a0c8d86d0806f3558e699e1df9c6c41809b400a7a19514421bc85c37d58 not found: ID does not exist" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.806939 4647 scope.go:117] "RemoveContainer" containerID="bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd" Nov 28 15:46:42 crc kubenswrapper[4647]: E1128 15:46:42.807132 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd\": container with ID starting with bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd not found: ID does not exist" containerID="bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.807163 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd"} err="failed to get container status \"bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd\": rpc error: code = NotFound desc = could not find container \"bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd\": container with ID starting with bb3955dfbc9c0faf1a61a03e659d3579a54b542d9b5977d6c34f2044cba474bd not found: ID does not exist" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.807179 4647 scope.go:117] "RemoveContainer" containerID="227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0" Nov 28 15:46:42 crc kubenswrapper[4647]: E1128 15:46:42.807364 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0\": container with ID starting with 227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0 not found: ID does not exist" containerID="227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.807379 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0"} err="failed to get container status \"227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0\": rpc error: code = NotFound desc = could not find container \"227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0\": container with ID starting with 227785b346ef36112331e2537f0f4ccbc8c38073ff886e06b4714c0cd56237f0 not found: ID does not exist" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.891613 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa9888f9-1fbf-439a-978c-77b304679edf-run-httpd\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.891663 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-scripts\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.891714 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnrzg\" (UniqueName: \"kubernetes.io/projected/aa9888f9-1fbf-439a-978c-77b304679edf-kube-api-access-wnrzg\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.891755 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa9888f9-1fbf-439a-978c-77b304679edf-log-httpd\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.891788 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.891806 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.891871 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.891903 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-config-data\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.892556 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa9888f9-1fbf-439a-978c-77b304679edf-run-httpd\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.892822 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa9888f9-1fbf-439a-978c-77b304679edf-log-httpd\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.897323 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.900997 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.909381 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-config-data\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.909808 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-scripts\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.910241 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa9888f9-1fbf-439a-978c-77b304679edf-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:42 crc kubenswrapper[4647]: I1128 15:46:42.924048 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnrzg\" (UniqueName: \"kubernetes.io/projected/aa9888f9-1fbf-439a-978c-77b304679edf-kube-api-access-wnrzg\") pod \"ceilometer-0\" (UID: \"aa9888f9-1fbf-439a-978c-77b304679edf\") " pod="openstack/ceilometer-0" Nov 28 15:46:43 crc kubenswrapper[4647]: I1128 15:46:43.043308 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:46:44 crc kubenswrapper[4647]: I1128 15:46:43.600211 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:46:44 crc kubenswrapper[4647]: I1128 15:46:43.647904 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa9888f9-1fbf-439a-978c-77b304679edf","Type":"ContainerStarted","Data":"cb536d829dfa24b79187b6e9b2f6cf82d72db20f34112f1e7a97a8a31b9883bc"} Nov 28 15:46:44 crc kubenswrapper[4647]: I1128 15:46:43.653484 4647 generic.go:334] "Generic (PLEG): container finished" podID="6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a" containerID="f2fa85d4ce8112b42f15c8b82ea2fb49518875673410fc61a22c12326647d965" exitCode=0 Nov 28 15:46:44 crc kubenswrapper[4647]: I1128 15:46:43.653547 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rn7j8" event={"ID":"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a","Type":"ContainerDied","Data":"f2fa85d4ce8112b42f15c8b82ea2fb49518875673410fc61a22c12326647d965"} Nov 28 15:46:44 crc kubenswrapper[4647]: I1128 15:46:44.406653 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41a1a869-93b8-4ef2-9a10-a124c3e23e01" path="/var/lib/kubelet/pods/41a1a869-93b8-4ef2-9a10-a124c3e23e01/volumes" Nov 28 15:46:44 crc kubenswrapper[4647]: I1128 15:46:44.664314 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa9888f9-1fbf-439a-978c-77b304679edf","Type":"ContainerStarted","Data":"b20f80e925cea9d3690b7ea5908ba4792d841898b5609f7ea57b9cf3a0e69cbc"} Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.027062 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.034447 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.034550 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.042381 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.061752 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.138089 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87bvz\" (UniqueName: \"kubernetes.io/projected/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-kube-api-access-87bvz\") pod \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.138190 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-scripts\") pod \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.138240 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-combined-ca-bundle\") pod \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.138500 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-config-data\") pod \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\" (UID: \"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a\") " Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.158052 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-kube-api-access-87bvz" (OuterVolumeSpecName: "kube-api-access-87bvz") pod "6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a" (UID: "6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a"). InnerVolumeSpecName "kube-api-access-87bvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.173953 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-scripts" (OuterVolumeSpecName: "scripts") pod "6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a" (UID: "6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.207280 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-config-data" (OuterVolumeSpecName: "config-data") pod "6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a" (UID: "6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.209899 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a" (UID: "6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.243391 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.243426 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87bvz\" (UniqueName: \"kubernetes.io/projected/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-kube-api-access-87bvz\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.243437 4647 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.243447 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.677888 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa9888f9-1fbf-439a-978c-77b304679edf","Type":"ContainerStarted","Data":"921590d64bc483d58fc72e54d70a30cb5f021a3cabf0c6c5836d38327eef2c08"} Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.680668 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rn7j8" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.688816 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rn7j8" event={"ID":"6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a","Type":"ContainerDied","Data":"457dc60a73b24e6583549e34adc14d2cd55722ec0073dbefa2cfde397db465a9"} Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.688958 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="457dc60a73b24e6583549e34adc14d2cd55722ec0073dbefa2cfde397db465a9" Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.908841 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.909066 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="bdf5529a-83da-4e7d-99ff-863aa224e295" containerName="nova-api-log" containerID="cri-o://e0dcab250ac246a3ab87ea8ee0088a98e03c53dbe78581a030c8e0d385536c37" gracePeriod=30 Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.909505 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="bdf5529a-83da-4e7d-99ff-863aa224e295" containerName="nova-api-api" containerID="cri-o://b4f8012c25d170ece7c7ee6a76eb59a816710cd037430329f6857f3225263e3f" gracePeriod=30 Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.944834 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:45 crc kubenswrapper[4647]: I1128 15:46:45.945062 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3" containerName="nova-scheduler-scheduler" containerID="cri-o://01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0" gracePeriod=30 Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.102373 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.695860 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa9888f9-1fbf-439a-978c-77b304679edf","Type":"ContainerStarted","Data":"a6eb3a0c45467a37aec7c769e741f02c59e1ac01e42c5d3f0363ada812620b41"} Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.700498 4647 generic.go:334] "Generic (PLEG): container finished" podID="bdf5529a-83da-4e7d-99ff-863aa224e295" containerID="b4f8012c25d170ece7c7ee6a76eb59a816710cd037430329f6857f3225263e3f" exitCode=0 Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.700669 4647 generic.go:334] "Generic (PLEG): container finished" podID="bdf5529a-83da-4e7d-99ff-863aa224e295" containerID="e0dcab250ac246a3ab87ea8ee0088a98e03c53dbe78581a030c8e0d385536c37" exitCode=143 Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.700739 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bdf5529a-83da-4e7d-99ff-863aa224e295","Type":"ContainerDied","Data":"b4f8012c25d170ece7c7ee6a76eb59a816710cd037430329f6857f3225263e3f"} Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.700805 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bdf5529a-83da-4e7d-99ff-863aa224e295","Type":"ContainerDied","Data":"e0dcab250ac246a3ab87ea8ee0088a98e03c53dbe78581a030c8e0d385536c37"} Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.822728 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.975962 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbvzl\" (UniqueName: \"kubernetes.io/projected/bdf5529a-83da-4e7d-99ff-863aa224e295-kube-api-access-tbvzl\") pod \"bdf5529a-83da-4e7d-99ff-863aa224e295\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.976095 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-internal-tls-certs\") pod \"bdf5529a-83da-4e7d-99ff-863aa224e295\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.976116 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-public-tls-certs\") pod \"bdf5529a-83da-4e7d-99ff-863aa224e295\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.976140 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bdf5529a-83da-4e7d-99ff-863aa224e295-logs\") pod \"bdf5529a-83da-4e7d-99ff-863aa224e295\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.976202 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-combined-ca-bundle\") pod \"bdf5529a-83da-4e7d-99ff-863aa224e295\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.976262 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-config-data\") pod \"bdf5529a-83da-4e7d-99ff-863aa224e295\" (UID: \"bdf5529a-83da-4e7d-99ff-863aa224e295\") " Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.977662 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdf5529a-83da-4e7d-99ff-863aa224e295-logs" (OuterVolumeSpecName: "logs") pod "bdf5529a-83da-4e7d-99ff-863aa224e295" (UID: "bdf5529a-83da-4e7d-99ff-863aa224e295"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:46 crc kubenswrapper[4647]: I1128 15:46:46.998716 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdf5529a-83da-4e7d-99ff-863aa224e295-kube-api-access-tbvzl" (OuterVolumeSpecName: "kube-api-access-tbvzl") pod "bdf5529a-83da-4e7d-99ff-863aa224e295" (UID: "bdf5529a-83da-4e7d-99ff-863aa224e295"). InnerVolumeSpecName "kube-api-access-tbvzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.046843 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bdf5529a-83da-4e7d-99ff-863aa224e295" (UID: "bdf5529a-83da-4e7d-99ff-863aa224e295"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.062243 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-config-data" (OuterVolumeSpecName: "config-data") pod "bdf5529a-83da-4e7d-99ff-863aa224e295" (UID: "bdf5529a-83da-4e7d-99ff-863aa224e295"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.077966 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbvzl\" (UniqueName: \"kubernetes.io/projected/bdf5529a-83da-4e7d-99ff-863aa224e295-kube-api-access-tbvzl\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.077994 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bdf5529a-83da-4e7d-99ff-863aa224e295-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.078006 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.078015 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.092523 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "bdf5529a-83da-4e7d-99ff-863aa224e295" (UID: "bdf5529a-83da-4e7d-99ff-863aa224e295"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.097934 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "bdf5529a-83da-4e7d-99ff-863aa224e295" (UID: "bdf5529a-83da-4e7d-99ff-863aa224e295"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.179604 4647 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.179635 4647 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdf5529a-83da-4e7d-99ff-863aa224e295-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.710675 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-log" containerID="cri-o://9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885" gracePeriod=30 Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.710933 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.711588 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bdf5529a-83da-4e7d-99ff-863aa224e295","Type":"ContainerDied","Data":"b42f33841578edd05d39619112f024d17be977f735ff6c43737ab37e8c25948b"} Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.711677 4647 scope.go:117] "RemoveContainer" containerID="b4f8012c25d170ece7c7ee6a76eb59a816710cd037430329f6857f3225263e3f" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.712426 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-metadata" containerID="cri-o://a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b" gracePeriod=30 Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.736375 4647 scope.go:117] "RemoveContainer" containerID="e0dcab250ac246a3ab87ea8ee0088a98e03c53dbe78581a030c8e0d385536c37" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.763503 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.771305 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.781949 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:47 crc kubenswrapper[4647]: E1128 15:46:47.782344 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdf5529a-83da-4e7d-99ff-863aa224e295" containerName="nova-api-log" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.782360 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdf5529a-83da-4e7d-99ff-863aa224e295" containerName="nova-api-log" Nov 28 15:46:47 crc kubenswrapper[4647]: E1128 15:46:47.782374 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a" containerName="nova-manage" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.782380 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a" containerName="nova-manage" Nov 28 15:46:47 crc kubenswrapper[4647]: E1128 15:46:47.782401 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdf5529a-83da-4e7d-99ff-863aa224e295" containerName="nova-api-api" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.782422 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdf5529a-83da-4e7d-99ff-863aa224e295" containerName="nova-api-api" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.782628 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdf5529a-83da-4e7d-99ff-863aa224e295" containerName="nova-api-log" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.782646 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a" containerName="nova-manage" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.782668 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdf5529a-83da-4e7d-99ff-863aa224e295" containerName="nova-api-api" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.783634 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.786050 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.786221 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.786346 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.802781 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.891796 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvbl8\" (UniqueName: \"kubernetes.io/projected/4d763768-9218-4866-94d2-1197f5e81fce-kube-api-access-hvbl8\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.892405 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d763768-9218-4866-94d2-1197f5e81fce-logs\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.892468 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.892672 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-config-data\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.892764 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-public-tls-certs\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.892841 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.994424 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d763768-9218-4866-94d2-1197f5e81fce-logs\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.994753 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.994800 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-config-data\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.994827 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-public-tls-certs\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.994863 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.994924 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvbl8\" (UniqueName: \"kubernetes.io/projected/4d763768-9218-4866-94d2-1197f5e81fce-kube-api-access-hvbl8\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:47 crc kubenswrapper[4647]: I1128 15:46:47.995023 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d763768-9218-4866-94d2-1197f5e81fce-logs\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.000590 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.001171 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-config-data\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.002837 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.018515 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d763768-9218-4866-94d2-1197f5e81fce-public-tls-certs\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.023800 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvbl8\" (UniqueName: \"kubernetes.io/projected/4d763768-9218-4866-94d2-1197f5e81fce-kube-api-access-hvbl8\") pod \"nova-api-0\" (UID: \"4d763768-9218-4866-94d2-1197f5e81fce\") " pod="openstack/nova-api-0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.103267 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.411522 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdf5529a-83da-4e7d-99ff-863aa224e295" path="/var/lib/kubelet/pods/bdf5529a-83da-4e7d-99ff-863aa224e295/volumes" Nov 28 15:46:48 crc kubenswrapper[4647]: E1128 15:46:48.433373 4647 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0 is running failed: container process not found" containerID="01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:46:48 crc kubenswrapper[4647]: E1128 15:46:48.433821 4647 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0 is running failed: container process not found" containerID="01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:46:48 crc kubenswrapper[4647]: E1128 15:46:48.434199 4647 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0 is running failed: container process not found" containerID="01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:46:48 crc kubenswrapper[4647]: E1128 15:46:48.434222 4647 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3" containerName="nova-scheduler-scheduler" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.602945 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:46:48 crc kubenswrapper[4647]: W1128 15:46:48.672947 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d763768_9218_4866_94d2_1197f5e81fce.slice/crio-f81c75ddc26020a8e67f2b4e23aa89e2b960a3631ea1b85d7d78ae2ee04d3d96 WatchSource:0}: Error finding container f81c75ddc26020a8e67f2b4e23aa89e2b960a3631ea1b85d7d78ae2ee04d3d96: Status 404 returned error can't find the container with id f81c75ddc26020a8e67f2b4e23aa89e2b960a3631ea1b85d7d78ae2ee04d3d96 Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.676679 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.709868 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-combined-ca-bundle\") pod \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.709991 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-config-data\") pod \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.710107 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d92ck\" (UniqueName: \"kubernetes.io/projected/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-kube-api-access-d92ck\") pod \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\" (UID: \"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3\") " Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.716696 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-kube-api-access-d92ck" (OuterVolumeSpecName: "kube-api-access-d92ck") pod "9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3" (UID: "9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3"). InnerVolumeSpecName "kube-api-access-d92ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.741547 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3" (UID: "9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.742153 4647 generic.go:334] "Generic (PLEG): container finished" podID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerID="9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885" exitCode=143 Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.742209 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"21030524-3ea9-4d2c-b620-82c5f58d66bf","Type":"ContainerDied","Data":"9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885"} Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.751694 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-config-data" (OuterVolumeSpecName: "config-data") pod "9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3" (UID: "9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.756620 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4d763768-9218-4866-94d2-1197f5e81fce","Type":"ContainerStarted","Data":"f81c75ddc26020a8e67f2b4e23aa89e2b960a3631ea1b85d7d78ae2ee04d3d96"} Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.763178 4647 generic.go:334] "Generic (PLEG): container finished" podID="9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3" containerID="01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0" exitCode=0 Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.763245 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3","Type":"ContainerDied","Data":"01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0"} Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.763272 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3","Type":"ContainerDied","Data":"f8b921c9c9e34e22c0ed90eec1516b76e976bafb36d053695a0cf639a11908a2"} Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.763290 4647 scope.go:117] "RemoveContainer" containerID="01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.763371 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.777988 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa9888f9-1fbf-439a-978c-77b304679edf","Type":"ContainerStarted","Data":"372f65253d2e6ee9d3e2f0d47d67c8677939a069aa6e7c2437a15f954ba7c797"} Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.778140 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.797920 4647 scope.go:117] "RemoveContainer" containerID="01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0" Nov 28 15:46:48 crc kubenswrapper[4647]: E1128 15:46:48.800591 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0\": container with ID starting with 01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0 not found: ID does not exist" containerID="01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.800634 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0"} err="failed to get container status \"01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0\": rpc error: code = NotFound desc = could not find container \"01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0\": container with ID starting with 01efce16c6831d483f205e10b1aa4abd2e2bed01a6d8f3f7fe64f44f098681b0 not found: ID does not exist" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.810492 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.812089 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d92ck\" (UniqueName: \"kubernetes.io/projected/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-kube-api-access-d92ck\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.812120 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.812132 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.826042 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.872294 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.872647 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.659086594 podStartE2EDuration="6.872624295s" podCreationTimestamp="2025-11-28 15:46:42 +0000 UTC" firstStartedPulling="2025-11-28 15:46:43.585230078 +0000 UTC m=+1333.432836489" lastFinishedPulling="2025-11-28 15:46:47.798767759 +0000 UTC m=+1337.646374190" observedRunningTime="2025-11-28 15:46:48.822439324 +0000 UTC m=+1338.670045735" watchObservedRunningTime="2025-11-28 15:46:48.872624295 +0000 UTC m=+1338.720230716" Nov 28 15:46:48 crc kubenswrapper[4647]: E1128 15:46:48.873684 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3" containerName="nova-scheduler-scheduler" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.873704 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3" containerName="nova-scheduler-scheduler" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.874263 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3" containerName="nova-scheduler-scheduler" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.875460 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.878244 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 15:46:48 crc kubenswrapper[4647]: I1128 15:46:48.948322 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.024583 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p5r4\" (UniqueName: \"kubernetes.io/projected/7597bf4e-3cd6-4adb-8723-8a86aaf60a05-kube-api-access-5p5r4\") pod \"nova-scheduler-0\" (UID: \"7597bf4e-3cd6-4adb-8723-8a86aaf60a05\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.024690 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7597bf4e-3cd6-4adb-8723-8a86aaf60a05-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7597bf4e-3cd6-4adb-8723-8a86aaf60a05\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.024722 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7597bf4e-3cd6-4adb-8723-8a86aaf60a05-config-data\") pod \"nova-scheduler-0\" (UID: \"7597bf4e-3cd6-4adb-8723-8a86aaf60a05\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.126142 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7597bf4e-3cd6-4adb-8723-8a86aaf60a05-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7597bf4e-3cd6-4adb-8723-8a86aaf60a05\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.126198 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7597bf4e-3cd6-4adb-8723-8a86aaf60a05-config-data\") pod \"nova-scheduler-0\" (UID: \"7597bf4e-3cd6-4adb-8723-8a86aaf60a05\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.126297 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p5r4\" (UniqueName: \"kubernetes.io/projected/7597bf4e-3cd6-4adb-8723-8a86aaf60a05-kube-api-access-5p5r4\") pod \"nova-scheduler-0\" (UID: \"7597bf4e-3cd6-4adb-8723-8a86aaf60a05\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.129757 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7597bf4e-3cd6-4adb-8723-8a86aaf60a05-config-data\") pod \"nova-scheduler-0\" (UID: \"7597bf4e-3cd6-4adb-8723-8a86aaf60a05\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.129851 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7597bf4e-3cd6-4adb-8723-8a86aaf60a05-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7597bf4e-3cd6-4adb-8723-8a86aaf60a05\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.145241 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p5r4\" (UniqueName: \"kubernetes.io/projected/7597bf4e-3cd6-4adb-8723-8a86aaf60a05-kube-api-access-5p5r4\") pod \"nova-scheduler-0\" (UID: \"7597bf4e-3cd6-4adb-8723-8a86aaf60a05\") " pod="openstack/nova-scheduler-0" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.220346 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.790434 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4d763768-9218-4866-94d2-1197f5e81fce","Type":"ContainerStarted","Data":"c6936ed1bd40bbc4c341c06d483635dbb1a5a32265ba202b0c15867f55bdcaa7"} Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.790736 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4d763768-9218-4866-94d2-1197f5e81fce","Type":"ContainerStarted","Data":"9dd270fe7a1666c97825d3e693b7535dad804336e0e9a19b4a537d2173a3befe"} Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.815912 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.815896066 podStartE2EDuration="2.815896066s" podCreationTimestamp="2025-11-28 15:46:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:49.807214506 +0000 UTC m=+1339.654820927" watchObservedRunningTime="2025-11-28 15:46:49.815896066 +0000 UTC m=+1339.663502477" Nov 28 15:46:49 crc kubenswrapper[4647]: I1128 15:46:49.831922 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:46:49 crc kubenswrapper[4647]: W1128 15:46:49.836296 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7597bf4e_3cd6_4adb_8723_8a86aaf60a05.slice/crio-c7e515fca844bd2891f5bc36f93c3d3e08e12197c6b489e36a4e235d2ea979ed WatchSource:0}: Error finding container c7e515fca844bd2891f5bc36f93c3d3e08e12197c6b489e36a4e235d2ea979ed: Status 404 returned error can't find the container with id c7e515fca844bd2891f5bc36f93c3d3e08e12197c6b489e36a4e235d2ea979ed Nov 28 15:46:50 crc kubenswrapper[4647]: I1128 15:46:50.408283 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3" path="/var/lib/kubelet/pods/9aef985d-1a4e-4f42-8a5c-9b615d0e7cc3/volumes" Nov 28 15:46:50 crc kubenswrapper[4647]: I1128 15:46:50.803460 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7597bf4e-3cd6-4adb-8723-8a86aaf60a05","Type":"ContainerStarted","Data":"6d04c0a5acaad835e8cfeba0b4423cf30a9b818ad70da321418c014f66439580"} Nov 28 15:46:50 crc kubenswrapper[4647]: I1128 15:46:50.803836 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7597bf4e-3cd6-4adb-8723-8a86aaf60a05","Type":"ContainerStarted","Data":"c7e515fca844bd2891f5bc36f93c3d3e08e12197c6b489e36a4e235d2ea979ed"} Nov 28 15:46:50 crc kubenswrapper[4647]: I1128 15:46:50.821276 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.821254705 podStartE2EDuration="2.821254705s" podCreationTimestamp="2025-11-28 15:46:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:50.814916577 +0000 UTC m=+1340.662522998" watchObservedRunningTime="2025-11-28 15:46:50.821254705 +0000 UTC m=+1340.668861126" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.145983 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.195:8775/\": read tcp 10.217.0.2:38836->10.217.0.195:8775: read: connection reset by peer" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.146038 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.195:8775/\": read tcp 10.217.0.2:38826->10.217.0.195:8775: read: connection reset by peer" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.662896 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.804283 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-config-data\") pod \"21030524-3ea9-4d2c-b620-82c5f58d66bf\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.804344 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bk2m7\" (UniqueName: \"kubernetes.io/projected/21030524-3ea9-4d2c-b620-82c5f58d66bf-kube-api-access-bk2m7\") pod \"21030524-3ea9-4d2c-b620-82c5f58d66bf\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.804384 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21030524-3ea9-4d2c-b620-82c5f58d66bf-logs\") pod \"21030524-3ea9-4d2c-b620-82c5f58d66bf\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.804406 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-nova-metadata-tls-certs\") pod \"21030524-3ea9-4d2c-b620-82c5f58d66bf\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.804583 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-combined-ca-bundle\") pod \"21030524-3ea9-4d2c-b620-82c5f58d66bf\" (UID: \"21030524-3ea9-4d2c-b620-82c5f58d66bf\") " Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.807736 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21030524-3ea9-4d2c-b620-82c5f58d66bf-logs" (OuterVolumeSpecName: "logs") pod "21030524-3ea9-4d2c-b620-82c5f58d66bf" (UID: "21030524-3ea9-4d2c-b620-82c5f58d66bf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.813726 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21030524-3ea9-4d2c-b620-82c5f58d66bf-kube-api-access-bk2m7" (OuterVolumeSpecName: "kube-api-access-bk2m7") pod "21030524-3ea9-4d2c-b620-82c5f58d66bf" (UID: "21030524-3ea9-4d2c-b620-82c5f58d66bf"). InnerVolumeSpecName "kube-api-access-bk2m7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.819359 4647 generic.go:334] "Generic (PLEG): container finished" podID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerID="a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b" exitCode=0 Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.820283 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.820751 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"21030524-3ea9-4d2c-b620-82c5f58d66bf","Type":"ContainerDied","Data":"a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b"} Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.820776 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"21030524-3ea9-4d2c-b620-82c5f58d66bf","Type":"ContainerDied","Data":"c433ab2e8e35654e9a64460dc8879e03aaf1aafcfb982153c1b19f0e88f39bf2"} Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.820791 4647 scope.go:117] "RemoveContainer" containerID="a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.864666 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "21030524-3ea9-4d2c-b620-82c5f58d66bf" (UID: "21030524-3ea9-4d2c-b620-82c5f58d66bf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.867808 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-config-data" (OuterVolumeSpecName: "config-data") pod "21030524-3ea9-4d2c-b620-82c5f58d66bf" (UID: "21030524-3ea9-4d2c-b620-82c5f58d66bf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.908647 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.908838 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.908902 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bk2m7\" (UniqueName: \"kubernetes.io/projected/21030524-3ea9-4d2c-b620-82c5f58d66bf-kube-api-access-bk2m7\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.910453 4647 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21030524-3ea9-4d2c-b620-82c5f58d66bf-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.917707 4647 scope.go:117] "RemoveContainer" containerID="9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.922309 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "21030524-3ea9-4d2c-b620-82c5f58d66bf" (UID: "21030524-3ea9-4d2c-b620-82c5f58d66bf"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.938648 4647 scope.go:117] "RemoveContainer" containerID="a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b" Nov 28 15:46:51 crc kubenswrapper[4647]: E1128 15:46:51.939024 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b\": container with ID starting with a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b not found: ID does not exist" containerID="a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.939060 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b"} err="failed to get container status \"a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b\": rpc error: code = NotFound desc = could not find container \"a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b\": container with ID starting with a648543d0200633331ae0c920de85d42b41fda0c494e9c402a0b2a386e03b45b not found: ID does not exist" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.939081 4647 scope.go:117] "RemoveContainer" containerID="9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885" Nov 28 15:46:51 crc kubenswrapper[4647]: E1128 15:46:51.939572 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885\": container with ID starting with 9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885 not found: ID does not exist" containerID="9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885" Nov 28 15:46:51 crc kubenswrapper[4647]: I1128 15:46:51.939599 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885"} err="failed to get container status \"9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885\": rpc error: code = NotFound desc = could not find container \"9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885\": container with ID starting with 9e72785027544956bf51a7756db044293cb46e49958f63cbeadd97d4411ac885 not found: ID does not exist" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.012170 4647 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/21030524-3ea9-4d2c-b620-82c5f58d66bf-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.154802 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.178796 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.261587 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:52 crc kubenswrapper[4647]: E1128 15:46:52.262988 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-metadata" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.263014 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-metadata" Nov 28 15:46:52 crc kubenswrapper[4647]: E1128 15:46:52.263060 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-log" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.263069 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-log" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.263575 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-metadata" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.263623 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" containerName="nova-metadata-log" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.270887 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.276828 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.277025 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.301523 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.404354 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21030524-3ea9-4d2c-b620-82c5f58d66bf" path="/var/lib/kubelet/pods/21030524-3ea9-4d2c-b620-82c5f58d66bf/volumes" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.423771 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.423833 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-logs\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.423876 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.423911 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75c7s\" (UniqueName: \"kubernetes.io/projected/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-kube-api-access-75c7s\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.423984 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-config-data\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.527070 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.527118 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-logs\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.527176 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.527215 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75c7s\" (UniqueName: \"kubernetes.io/projected/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-kube-api-access-75c7s\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.527315 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-config-data\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.528762 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-logs\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.533209 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-config-data\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.537878 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.542538 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.546240 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75c7s\" (UniqueName: \"kubernetes.io/projected/fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f-kube-api-access-75c7s\") pod \"nova-metadata-0\" (UID: \"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f\") " pod="openstack/nova-metadata-0" Nov 28 15:46:52 crc kubenswrapper[4647]: I1128 15:46:52.606895 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:46:53 crc kubenswrapper[4647]: I1128 15:46:53.166608 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:46:53 crc kubenswrapper[4647]: W1128 15:46:53.171941 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe41f81d_e4cf_4c08_a00f_c1f41eb6d04f.slice/crio-d54f9796da5bbc60c23a611a2fd36940e292229f3afb7ddf2134c2400644d6bb WatchSource:0}: Error finding container d54f9796da5bbc60c23a611a2fd36940e292229f3afb7ddf2134c2400644d6bb: Status 404 returned error can't find the container with id d54f9796da5bbc60c23a611a2fd36940e292229f3afb7ddf2134c2400644d6bb Nov 28 15:46:53 crc kubenswrapper[4647]: I1128 15:46:53.844472 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f","Type":"ContainerStarted","Data":"63ef8cf220a623f9d8d9852d64fc024000eb4f16ddcecc3f916da7d070d5ca7c"} Nov 28 15:46:53 crc kubenswrapper[4647]: I1128 15:46:53.844878 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f","Type":"ContainerStarted","Data":"fda8c1a7fdeda794c09fcf6ea22a1fbe0ba5307920bd56263ba21fcf9931f597"} Nov 28 15:46:53 crc kubenswrapper[4647]: I1128 15:46:53.844900 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f","Type":"ContainerStarted","Data":"d54f9796da5bbc60c23a611a2fd36940e292229f3afb7ddf2134c2400644d6bb"} Nov 28 15:46:53 crc kubenswrapper[4647]: I1128 15:46:53.877371 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.877342903 podStartE2EDuration="1.877342903s" podCreationTimestamp="2025-11-28 15:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:46:53.867394029 +0000 UTC m=+1343.715000460" watchObservedRunningTime="2025-11-28 15:46:53.877342903 +0000 UTC m=+1343.724949324" Nov 28 15:46:54 crc kubenswrapper[4647]: I1128 15:46:54.221609 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 15:46:57 crc kubenswrapper[4647]: I1128 15:46:57.607242 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:46:57 crc kubenswrapper[4647]: I1128 15:46:57.608132 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:46:58 crc kubenswrapper[4647]: I1128 15:46:58.103815 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:46:58 crc kubenswrapper[4647]: I1128 15:46:58.103898 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:46:59 crc kubenswrapper[4647]: I1128 15:46:59.117746 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4d763768-9218-4866-94d2-1197f5e81fce" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:46:59 crc kubenswrapper[4647]: I1128 15:46:59.117741 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4d763768-9218-4866-94d2-1197f5e81fce" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:46:59 crc kubenswrapper[4647]: I1128 15:46:59.222068 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 15:46:59 crc kubenswrapper[4647]: I1128 15:46:59.260364 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 15:46:59 crc kubenswrapper[4647]: I1128 15:46:59.966170 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 15:47:02 crc kubenswrapper[4647]: I1128 15:47:02.608072 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:47:02 crc kubenswrapper[4647]: I1128 15:47:02.608495 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:47:03 crc kubenswrapper[4647]: I1128 15:47:03.626648 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:47:03 crc kubenswrapper[4647]: I1128 15:47:03.626998 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:47:08 crc kubenswrapper[4647]: I1128 15:47:08.126092 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:47:08 crc kubenswrapper[4647]: I1128 15:47:08.127289 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:47:08 crc kubenswrapper[4647]: I1128 15:47:08.127329 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:47:08 crc kubenswrapper[4647]: I1128 15:47:08.142100 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:47:09 crc kubenswrapper[4647]: I1128 15:47:09.015121 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:47:09 crc kubenswrapper[4647]: I1128 15:47:09.023032 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:47:12 crc kubenswrapper[4647]: I1128 15:47:12.615119 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:47:12 crc kubenswrapper[4647]: I1128 15:47:12.615593 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:47:12 crc kubenswrapper[4647]: I1128 15:47:12.621750 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:47:12 crc kubenswrapper[4647]: I1128 15:47:12.624547 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:47:13 crc kubenswrapper[4647]: I1128 15:47:13.056176 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 15:47:23 crc kubenswrapper[4647]: I1128 15:47:23.194728 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:47:24 crc kubenswrapper[4647]: I1128 15:47:24.248398 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:47:27 crc kubenswrapper[4647]: I1128 15:47:27.653516 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="49c7e330-cae6-469f-9a44-7087cc112af1" containerName="rabbitmq" containerID="cri-o://f8fd3dd1f90ff0fbd13ce6172d2787a11e63ac207094ffff5755cd5628d899c9" gracePeriod=604796 Nov 28 15:47:29 crc kubenswrapper[4647]: I1128 15:47:29.392864 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="27fe6c77-c0f2-4398-a337-133eaca78fb4" containerName="rabbitmq" containerID="cri-o://7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102" gracePeriod=604795 Nov 28 15:47:29 crc kubenswrapper[4647]: I1128 15:47:29.534277 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="49c7e330-cae6-469f-9a44-7087cc112af1" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.97:5671: connect: connection refused" Nov 28 15:47:29 crc kubenswrapper[4647]: I1128 15:47:29.926520 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="27fe6c77-c0f2-4398-a337-133eaca78fb4" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Nov 28 15:47:34 crc kubenswrapper[4647]: I1128 15:47:34.303590 4647 generic.go:334] "Generic (PLEG): container finished" podID="49c7e330-cae6-469f-9a44-7087cc112af1" containerID="f8fd3dd1f90ff0fbd13ce6172d2787a11e63ac207094ffff5755cd5628d899c9" exitCode=0 Nov 28 15:47:34 crc kubenswrapper[4647]: I1128 15:47:34.303702 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"49c7e330-cae6-469f-9a44-7087cc112af1","Type":"ContainerDied","Data":"f8fd3dd1f90ff0fbd13ce6172d2787a11e63ac207094ffff5755cd5628d899c9"} Nov 28 15:47:34 crc kubenswrapper[4647]: I1128 15:47:34.917590 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.056908 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-plugins\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.056953 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.056982 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-server-conf\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.057047 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/49c7e330-cae6-469f-9a44-7087cc112af1-pod-info\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.057086 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/49c7e330-cae6-469f-9a44-7087cc112af1-erlang-cookie-secret\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.057119 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-plugins-conf\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.057134 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-erlang-cookie\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.057182 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-tls\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.057253 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-confd\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.057276 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-config-data\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.057363 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jmtq\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-kube-api-access-6jmtq\") pod \"49c7e330-cae6-469f-9a44-7087cc112af1\" (UID: \"49c7e330-cae6-469f-9a44-7087cc112af1\") " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.058375 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.060365 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.060929 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.068505 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/49c7e330-cae6-469f-9a44-7087cc112af1-pod-info" (OuterVolumeSpecName: "pod-info") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.069856 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "persistence") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.071939 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c7e330-cae6-469f-9a44-7087cc112af1-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.072682 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-kube-api-access-6jmtq" (OuterVolumeSpecName: "kube-api-access-6jmtq") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "kube-api-access-6jmtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.084984 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.106893 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-config-data" (OuterVolumeSpecName: "config-data") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.159892 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jmtq\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-kube-api-access-6jmtq\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.159924 4647 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.159948 4647 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.159957 4647 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/49c7e330-cae6-469f-9a44-7087cc112af1-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.159966 4647 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/49c7e330-cae6-469f-9a44-7087cc112af1-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.159975 4647 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.159984 4647 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.159992 4647 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.160001 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.200343 4647 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.201404 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-server-conf" (OuterVolumeSpecName: "server-conf") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.255842 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "49c7e330-cae6-469f-9a44-7087cc112af1" (UID: "49c7e330-cae6-469f-9a44-7087cc112af1"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.261779 4647 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/49c7e330-cae6-469f-9a44-7087cc112af1-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.261809 4647 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.261822 4647 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/49c7e330-cae6-469f-9a44-7087cc112af1-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.315621 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"49c7e330-cae6-469f-9a44-7087cc112af1","Type":"ContainerDied","Data":"f59bed3300d1f113f96b013f69923c6e42d92008a55573b8a7a6a4d18dd0a1c9"} Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.315671 4647 scope.go:117] "RemoveContainer" containerID="f8fd3dd1f90ff0fbd13ce6172d2787a11e63ac207094ffff5755cd5628d899c9" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.315708 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.352311 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.356020 4647 scope.go:117] "RemoveContainer" containerID="216cb0060d00175d92f57504f5d155efb9cc08933244d34a8618e47e828942fb" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.362192 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.386986 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:47:35 crc kubenswrapper[4647]: E1128 15:47:35.387349 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c7e330-cae6-469f-9a44-7087cc112af1" containerName="rabbitmq" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.387367 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c7e330-cae6-469f-9a44-7087cc112af1" containerName="rabbitmq" Nov 28 15:47:35 crc kubenswrapper[4647]: E1128 15:47:35.387422 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c7e330-cae6-469f-9a44-7087cc112af1" containerName="setup-container" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.387430 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c7e330-cae6-469f-9a44-7087cc112af1" containerName="setup-container" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.387606 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c7e330-cae6-469f-9a44-7087cc112af1" containerName="rabbitmq" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.388556 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.391358 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.391552 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-kcbph" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.391579 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.392271 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.392405 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.393142 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.393504 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.429495 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.465754 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/50660b1a-39a9-4ada-a275-a068d6b406bf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.465832 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.465856 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.465880 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.465920 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p86xr\" (UniqueName: \"kubernetes.io/projected/50660b1a-39a9-4ada-a275-a068d6b406bf-kube-api-access-p86xr\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.465955 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/50660b1a-39a9-4ada-a275-a068d6b406bf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.465974 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/50660b1a-39a9-4ada-a275-a068d6b406bf-config-data\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.466002 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/50660b1a-39a9-4ada-a275-a068d6b406bf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.466020 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.467335 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/50660b1a-39a9-4ada-a275-a068d6b406bf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.467384 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.568897 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/50660b1a-39a9-4ada-a275-a068d6b406bf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.568970 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569044 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/50660b1a-39a9-4ada-a275-a068d6b406bf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569088 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569119 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569147 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569186 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p86xr\" (UniqueName: \"kubernetes.io/projected/50660b1a-39a9-4ada-a275-a068d6b406bf-kube-api-access-p86xr\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569219 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/50660b1a-39a9-4ada-a275-a068d6b406bf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569247 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/50660b1a-39a9-4ada-a275-a068d6b406bf-config-data\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569285 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/50660b1a-39a9-4ada-a275-a068d6b406bf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569316 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569824 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/50660b1a-39a9-4ada-a275-a068d6b406bf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.569889 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.570106 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.571513 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/50660b1a-39a9-4ada-a275-a068d6b406bf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.572261 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/50660b1a-39a9-4ada-a275-a068d6b406bf-config-data\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.572756 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.573480 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.574150 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/50660b1a-39a9-4ada-a275-a068d6b406bf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.575004 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/50660b1a-39a9-4ada-a275-a068d6b406bf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.582313 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/50660b1a-39a9-4ada-a275-a068d6b406bf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.590851 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p86xr\" (UniqueName: \"kubernetes.io/projected/50660b1a-39a9-4ada-a275-a068d6b406bf-kube-api-access-p86xr\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.600938 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"50660b1a-39a9-4ada-a275-a068d6b406bf\") " pod="openstack/rabbitmq-server-0" Nov 28 15:47:35 crc kubenswrapper[4647]: I1128 15:47:35.738538 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.015709 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kwxhz"] Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.020501 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.026742 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.059704 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kwxhz"] Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.082094 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-sb\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.082141 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-svc\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.082175 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-nb\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.082238 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-swift-storage-0\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.082281 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-openstack-edpm-ipam\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.082343 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qx56\" (UniqueName: \"kubernetes.io/projected/ea69567b-ba5e-49b8-827a-2b6264ec33bd-kube-api-access-2qx56\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.082362 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-config\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.118391 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.184773 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185241 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-tls\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185303 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-config-data\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185342 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6lsr\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-kube-api-access-k6lsr\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185363 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185383 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/27fe6c77-c0f2-4398-a337-133eaca78fb4-pod-info\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185449 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-confd\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185490 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-plugins\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185514 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-plugins-conf\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185536 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/27fe6c77-c0f2-4398-a337-133eaca78fb4-erlang-cookie-secret\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185601 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-server-conf\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185663 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-erlang-cookie\") pod \"27fe6c77-c0f2-4398-a337-133eaca78fb4\" (UID: \"27fe6c77-c0f2-4398-a337-133eaca78fb4\") " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185890 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-sb\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185927 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-svc\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.185960 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-nb\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.186021 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-swift-storage-0\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.186064 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-openstack-edpm-ipam\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.186117 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qx56\" (UniqueName: \"kubernetes.io/projected/ea69567b-ba5e-49b8-827a-2b6264ec33bd-kube-api-access-2qx56\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.186135 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-config\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.186942 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-config\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.189791 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-svc\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.202362 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-swift-storage-0\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.202936 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-nb\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.203457 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-openstack-edpm-ipam\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.205498 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.213236 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.215537 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.216965 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.217264 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-sb\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.223863 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27fe6c77-c0f2-4398-a337-133eaca78fb4-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.256160 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "persistence") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.256335 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-kube-api-access-k6lsr" (OuterVolumeSpecName: "kube-api-access-k6lsr") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "kube-api-access-k6lsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.256490 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/27fe6c77-c0f2-4398-a337-133eaca78fb4-pod-info" (OuterVolumeSpecName: "pod-info") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.288921 4647 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.288963 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6lsr\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-kube-api-access-k6lsr\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.288988 4647 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.288998 4647 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/27fe6c77-c0f2-4398-a337-133eaca78fb4-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.289011 4647 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.289019 4647 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.289030 4647 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/27fe6c77-c0f2-4398-a337-133eaca78fb4-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.289042 4647 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.428910 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qx56\" (UniqueName: \"kubernetes.io/projected/ea69567b-ba5e-49b8-827a-2b6264ec33bd-kube-api-access-2qx56\") pod \"dnsmasq-dns-5576978c7c-kwxhz\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.482794 4647 generic.go:334] "Generic (PLEG): container finished" podID="27fe6c77-c0f2-4398-a337-133eaca78fb4" containerID="7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102" exitCode=0 Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.483188 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.490246 4647 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.520483 4647 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.543119 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c7e330-cae6-469f-9a44-7087cc112af1" path="/var/lib/kubelet/pods/49c7e330-cae6-469f-9a44-7087cc112af1/volumes" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.569647 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"27fe6c77-c0f2-4398-a337-133eaca78fb4","Type":"ContainerDied","Data":"7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102"} Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.570176 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"27fe6c77-c0f2-4398-a337-133eaca78fb4","Type":"ContainerDied","Data":"683ec6eefa54f3ef671e6a174b2b046240f657da8b28c01a1c90bad69f3220c1"} Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.570284 4647 scope.go:117] "RemoveContainer" containerID="7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.573904 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-config-data" (OuterVolumeSpecName: "config-data") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.625665 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.633114 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-server-conf" (OuterVolumeSpecName: "server-conf") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.664007 4647 scope.go:117] "RemoveContainer" containerID="c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.684171 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "27fe6c77-c0f2-4398-a337-133eaca78fb4" (UID: "27fe6c77-c0f2-4398-a337-133eaca78fb4"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.701095 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.716608 4647 scope.go:117] "RemoveContainer" containerID="7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102" Nov 28 15:47:36 crc kubenswrapper[4647]: E1128 15:47:36.716981 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102\": container with ID starting with 7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102 not found: ID does not exist" containerID="7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.717026 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102"} err="failed to get container status \"7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102\": rpc error: code = NotFound desc = could not find container \"7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102\": container with ID starting with 7e1d69717d7b97ad5934f85f6def52cec49ae53a5782f0d18a812cd7e4178102 not found: ID does not exist" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.717054 4647 scope.go:117] "RemoveContainer" containerID="c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914" Nov 28 15:47:36 crc kubenswrapper[4647]: E1128 15:47:36.717273 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914\": container with ID starting with c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914 not found: ID does not exist" containerID="c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.717293 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914"} err="failed to get container status \"c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914\": rpc error: code = NotFound desc = could not find container \"c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914\": container with ID starting with c1836145fbee3b1c1869c5f55ff003ec738600e21f3fa6883787db8ca2b53914 not found: ID does not exist" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.727966 4647 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/27fe6c77-c0f2-4398-a337-133eaca78fb4-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.728003 4647 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/27fe6c77-c0f2-4398-a337-133eaca78fb4-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.828042 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.854688 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.869071 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:47:36 crc kubenswrapper[4647]: E1128 15:47:36.869531 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27fe6c77-c0f2-4398-a337-133eaca78fb4" containerName="setup-container" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.869550 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="27fe6c77-c0f2-4398-a337-133eaca78fb4" containerName="setup-container" Nov 28 15:47:36 crc kubenswrapper[4647]: E1128 15:47:36.869599 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27fe6c77-c0f2-4398-a337-133eaca78fb4" containerName="rabbitmq" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.869605 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="27fe6c77-c0f2-4398-a337-133eaca78fb4" containerName="rabbitmq" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.869798 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="27fe6c77-c0f2-4398-a337-133eaca78fb4" containerName="rabbitmq" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.870804 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.877241 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.877479 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.883533 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.883867 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.884043 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-r84c9" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.884169 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.884299 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 15:47:36 crc kubenswrapper[4647]: I1128 15:47:36.887601 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.035295 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/271d0057-21bf-4899-9284-d8d2beb015b6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.035653 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/271d0057-21bf-4899-9284-d8d2beb015b6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.035758 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.035839 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.035940 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.036041 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kpml\" (UniqueName: \"kubernetes.io/projected/271d0057-21bf-4899-9284-d8d2beb015b6-kube-api-access-7kpml\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.036662 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/271d0057-21bf-4899-9284-d8d2beb015b6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.036724 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/271d0057-21bf-4899-9284-d8d2beb015b6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.036803 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/271d0057-21bf-4899-9284-d8d2beb015b6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.036922 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.037030 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: E1128 15:47:37.109397 4647 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27fe6c77_c0f2_4398_a337_133eaca78fb4.slice/crio-683ec6eefa54f3ef671e6a174b2b046240f657da8b28c01a1c90bad69f3220c1\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27fe6c77_c0f2_4398_a337_133eaca78fb4.slice\": RecentStats: unable to find data in memory cache]" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138522 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/271d0057-21bf-4899-9284-d8d2beb015b6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138583 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138611 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138632 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138671 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kpml\" (UniqueName: \"kubernetes.io/projected/271d0057-21bf-4899-9284-d8d2beb015b6-kube-api-access-7kpml\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138721 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/271d0057-21bf-4899-9284-d8d2beb015b6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138738 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/271d0057-21bf-4899-9284-d8d2beb015b6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138762 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/271d0057-21bf-4899-9284-d8d2beb015b6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138797 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138832 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.138867 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/271d0057-21bf-4899-9284-d8d2beb015b6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.140124 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.140298 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.140901 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/271d0057-21bf-4899-9284-d8d2beb015b6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.141139 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.141454 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/271d0057-21bf-4899-9284-d8d2beb015b6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.141993 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/271d0057-21bf-4899-9284-d8d2beb015b6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.146603 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/271d0057-21bf-4899-9284-d8d2beb015b6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.169699 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.169746 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/271d0057-21bf-4899-9284-d8d2beb015b6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.182753 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.183439 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/271d0057-21bf-4899-9284-d8d2beb015b6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.183774 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kpml\" (UniqueName: \"kubernetes.io/projected/271d0057-21bf-4899-9284-d8d2beb015b6-kube-api-access-7kpml\") pod \"rabbitmq-cell1-server-0\" (UID: \"271d0057-21bf-4899-9284-d8d2beb015b6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.252035 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.252572 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kwxhz"] Nov 28 15:47:37 crc kubenswrapper[4647]: W1128 15:47:37.258877 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea69567b_ba5e_49b8_827a_2b6264ec33bd.slice/crio-4baefc5d5026f9eaaa3daaf4d6d1df1d289242056583e58e241c4d72efadf6ae WatchSource:0}: Error finding container 4baefc5d5026f9eaaa3daaf4d6d1df1d289242056583e58e241c4d72efadf6ae: Status 404 returned error can't find the container with id 4baefc5d5026f9eaaa3daaf4d6d1df1d289242056583e58e241c4d72efadf6ae Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.494558 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"50660b1a-39a9-4ada-a275-a068d6b406bf","Type":"ContainerStarted","Data":"e361ec96f6b8c7e048582cf2342b0a46db118f7a83a5a87b11b63f76ad2e9773"} Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.505933 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" event={"ID":"ea69567b-ba5e-49b8-827a-2b6264ec33bd","Type":"ContainerStarted","Data":"4baefc5d5026f9eaaa3daaf4d6d1df1d289242056583e58e241c4d72efadf6ae"} Nov 28 15:47:37 crc kubenswrapper[4647]: I1128 15:47:37.730264 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:47:37 crc kubenswrapper[4647]: W1128 15:47:37.733044 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod271d0057_21bf_4899_9284_d8d2beb015b6.slice/crio-9578742e219dcdf7c006a80d73c7c8c41f265e8c173fdbccc513cae5236b6d71 WatchSource:0}: Error finding container 9578742e219dcdf7c006a80d73c7c8c41f265e8c173fdbccc513cae5236b6d71: Status 404 returned error can't find the container with id 9578742e219dcdf7c006a80d73c7c8c41f265e8c173fdbccc513cae5236b6d71 Nov 28 15:47:38 crc kubenswrapper[4647]: I1128 15:47:38.410756 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27fe6c77-c0f2-4398-a337-133eaca78fb4" path="/var/lib/kubelet/pods/27fe6c77-c0f2-4398-a337-133eaca78fb4/volumes" Nov 28 15:47:38 crc kubenswrapper[4647]: I1128 15:47:38.541562 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"271d0057-21bf-4899-9284-d8d2beb015b6","Type":"ContainerStarted","Data":"9578742e219dcdf7c006a80d73c7c8c41f265e8c173fdbccc513cae5236b6d71"} Nov 28 15:47:38 crc kubenswrapper[4647]: I1128 15:47:38.546654 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"50660b1a-39a9-4ada-a275-a068d6b406bf","Type":"ContainerStarted","Data":"1948edfd01b54c0cfb20efa3c22c73cd27397b49447289f806e70643bf4c2695"} Nov 28 15:47:38 crc kubenswrapper[4647]: I1128 15:47:38.549258 4647 generic.go:334] "Generic (PLEG): container finished" podID="ea69567b-ba5e-49b8-827a-2b6264ec33bd" containerID="1b9458f3171c6ff5f2600e311f52aacbf51ab152e3667aff61afab0303a6d5c8" exitCode=0 Nov 28 15:47:38 crc kubenswrapper[4647]: I1128 15:47:38.549334 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" event={"ID":"ea69567b-ba5e-49b8-827a-2b6264ec33bd","Type":"ContainerDied","Data":"1b9458f3171c6ff5f2600e311f52aacbf51ab152e3667aff61afab0303a6d5c8"} Nov 28 15:47:39 crc kubenswrapper[4647]: I1128 15:47:39.565330 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" event={"ID":"ea69567b-ba5e-49b8-827a-2b6264ec33bd","Type":"ContainerStarted","Data":"06686196ca3ae262607cf92dbc12eeaa4376df20d3667df1ba1aee499bbce9c3"} Nov 28 15:47:39 crc kubenswrapper[4647]: I1128 15:47:39.603107 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" podStartSLOduration=4.603088142 podStartE2EDuration="4.603088142s" podCreationTimestamp="2025-11-28 15:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:47:39.593634791 +0000 UTC m=+1389.441241222" watchObservedRunningTime="2025-11-28 15:47:39.603088142 +0000 UTC m=+1389.450694573" Nov 28 15:47:40 crc kubenswrapper[4647]: I1128 15:47:40.583605 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"271d0057-21bf-4899-9284-d8d2beb015b6","Type":"ContainerStarted","Data":"4d329a7ce2f0465bf6c68cf10bfa5e61a096f84642665816df03aa7daf8506dd"} Nov 28 15:47:40 crc kubenswrapper[4647]: I1128 15:47:40.584638 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:46 crc kubenswrapper[4647]: I1128 15:47:46.703716 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:46 crc kubenswrapper[4647]: I1128 15:47:46.828310 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-vgkth"] Nov 28 15:47:46 crc kubenswrapper[4647]: I1128 15:47:46.837642 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" podUID="3170fd20-182e-4cc7-b636-d6173b6c7f08" containerName="dnsmasq-dns" containerID="cri-o://d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704" gracePeriod=10 Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.021933 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-667c9c995c-qp6ls"] Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.023905 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.052942 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-667c9c995c-qp6ls"] Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.109015 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-dns-svc\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.109402 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-dns-swift-storage-0\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.109474 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-openstack-edpm-ipam\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.109589 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-config\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.109619 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnrbk\" (UniqueName: \"kubernetes.io/projected/c06de7d8-7b65-4a7b-876c-0049182a2ec0-kube-api-access-vnrbk\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.109681 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-ovsdbserver-nb\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.109803 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-ovsdbserver-sb\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.211194 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-dns-svc\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.211503 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-dns-swift-storage-0\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.211624 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-openstack-edpm-ipam\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.211754 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-config\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.211823 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnrbk\" (UniqueName: \"kubernetes.io/projected/c06de7d8-7b65-4a7b-876c-0049182a2ec0-kube-api-access-vnrbk\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.211893 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-ovsdbserver-nb\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.212000 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-ovsdbserver-sb\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.213837 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-dns-swift-storage-0\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.214174 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-openstack-edpm-ipam\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.214486 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-ovsdbserver-sb\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.215075 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-ovsdbserver-nb\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.215658 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-config\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.217452 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c06de7d8-7b65-4a7b-876c-0049182a2ec0-dns-svc\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.244199 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnrbk\" (UniqueName: \"kubernetes.io/projected/c06de7d8-7b65-4a7b-876c-0049182a2ec0-kube-api-access-vnrbk\") pod \"dnsmasq-dns-667c9c995c-qp6ls\" (UID: \"c06de7d8-7b65-4a7b-876c-0049182a2ec0\") " pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.348031 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.351803 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.415652 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-sb\") pod \"3170fd20-182e-4cc7-b636-d6173b6c7f08\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.416166 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-svc\") pod \"3170fd20-182e-4cc7-b636-d6173b6c7f08\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.416624 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-nb\") pod \"3170fd20-182e-4cc7-b636-d6173b6c7f08\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.416672 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-swift-storage-0\") pod \"3170fd20-182e-4cc7-b636-d6173b6c7f08\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.416719 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75qpl\" (UniqueName: \"kubernetes.io/projected/3170fd20-182e-4cc7-b636-d6173b6c7f08-kube-api-access-75qpl\") pod \"3170fd20-182e-4cc7-b636-d6173b6c7f08\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.416805 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-config\") pod \"3170fd20-182e-4cc7-b636-d6173b6c7f08\" (UID: \"3170fd20-182e-4cc7-b636-d6173b6c7f08\") " Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.435968 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3170fd20-182e-4cc7-b636-d6173b6c7f08-kube-api-access-75qpl" (OuterVolumeSpecName: "kube-api-access-75qpl") pod "3170fd20-182e-4cc7-b636-d6173b6c7f08" (UID: "3170fd20-182e-4cc7-b636-d6173b6c7f08"). InnerVolumeSpecName "kube-api-access-75qpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.473848 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3170fd20-182e-4cc7-b636-d6173b6c7f08" (UID: "3170fd20-182e-4cc7-b636-d6173b6c7f08"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.486307 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3170fd20-182e-4cc7-b636-d6173b6c7f08" (UID: "3170fd20-182e-4cc7-b636-d6173b6c7f08"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.494988 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3170fd20-182e-4cc7-b636-d6173b6c7f08" (UID: "3170fd20-182e-4cc7-b636-d6173b6c7f08"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.519657 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.519684 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.519693 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.519705 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75qpl\" (UniqueName: \"kubernetes.io/projected/3170fd20-182e-4cc7-b636-d6173b6c7f08-kube-api-access-75qpl\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.528278 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3170fd20-182e-4cc7-b636-d6173b6c7f08" (UID: "3170fd20-182e-4cc7-b636-d6173b6c7f08"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.541830 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-config" (OuterVolumeSpecName: "config") pod "3170fd20-182e-4cc7-b636-d6173b6c7f08" (UID: "3170fd20-182e-4cc7-b636-d6173b6c7f08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.621642 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.621691 4647 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3170fd20-182e-4cc7-b636-d6173b6c7f08-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.668798 4647 generic.go:334] "Generic (PLEG): container finished" podID="3170fd20-182e-4cc7-b636-d6173b6c7f08" containerID="d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704" exitCode=0 Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.668842 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" event={"ID":"3170fd20-182e-4cc7-b636-d6173b6c7f08","Type":"ContainerDied","Data":"d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704"} Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.668867 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.668874 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-vgkth" event={"ID":"3170fd20-182e-4cc7-b636-d6173b6c7f08","Type":"ContainerDied","Data":"1811300758efed455c221aa2827e63d8677bb8c9b11551ff30e668df70bdf458"} Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.668910 4647 scope.go:117] "RemoveContainer" containerID="d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.696310 4647 scope.go:117] "RemoveContainer" containerID="19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.705033 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-vgkth"] Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.714218 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-vgkth"] Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.719604 4647 scope.go:117] "RemoveContainer" containerID="d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704" Nov 28 15:47:47 crc kubenswrapper[4647]: E1128 15:47:47.720100 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704\": container with ID starting with d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704 not found: ID does not exist" containerID="d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.720136 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704"} err="failed to get container status \"d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704\": rpc error: code = NotFound desc = could not find container \"d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704\": container with ID starting with d4b2749ad6630327d5ee1fda0cd1d42b3f842d3357c64ba0802392a553188704 not found: ID does not exist" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.720159 4647 scope.go:117] "RemoveContainer" containerID="19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5" Nov 28 15:47:47 crc kubenswrapper[4647]: E1128 15:47:47.720405 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5\": container with ID starting with 19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5 not found: ID does not exist" containerID="19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.720485 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5"} err="failed to get container status \"19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5\": rpc error: code = NotFound desc = could not find container \"19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5\": container with ID starting with 19a5f64ae97e549e584a9738a467c10aea63a4282514616e152d9cf77b820de5 not found: ID does not exist" Nov 28 15:47:47 crc kubenswrapper[4647]: I1128 15:47:47.910521 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-667c9c995c-qp6ls"] Nov 28 15:47:48 crc kubenswrapper[4647]: I1128 15:47:48.414088 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3170fd20-182e-4cc7-b636-d6173b6c7f08" path="/var/lib/kubelet/pods/3170fd20-182e-4cc7-b636-d6173b6c7f08/volumes" Nov 28 15:47:48 crc kubenswrapper[4647]: I1128 15:47:48.694738 4647 generic.go:334] "Generic (PLEG): container finished" podID="c06de7d8-7b65-4a7b-876c-0049182a2ec0" containerID="a0c0093c7975726a846a8028fd5689c5fbf4aebf7ffdb0e10fa4e3333a4b0a2d" exitCode=0 Nov 28 15:47:48 crc kubenswrapper[4647]: I1128 15:47:48.694797 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" event={"ID":"c06de7d8-7b65-4a7b-876c-0049182a2ec0","Type":"ContainerDied","Data":"a0c0093c7975726a846a8028fd5689c5fbf4aebf7ffdb0e10fa4e3333a4b0a2d"} Nov 28 15:47:48 crc kubenswrapper[4647]: I1128 15:47:48.695150 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" event={"ID":"c06de7d8-7b65-4a7b-876c-0049182a2ec0","Type":"ContainerStarted","Data":"37e5e1f63e4d546cc995ee815bef410c70df54a6069c139de03e850e9c6b9fc5"} Nov 28 15:47:49 crc kubenswrapper[4647]: I1128 15:47:49.711470 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" event={"ID":"c06de7d8-7b65-4a7b-876c-0049182a2ec0","Type":"ContainerStarted","Data":"af0419259d8b426e59bdaa0f8fc4214f6cca30eadc8b3fb996ee1cd3cf39087b"} Nov 28 15:47:49 crc kubenswrapper[4647]: I1128 15:47:49.712629 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:49 crc kubenswrapper[4647]: I1128 15:47:49.743121 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" podStartSLOduration=3.743098963 podStartE2EDuration="3.743098963s" podCreationTimestamp="2025-11-28 15:47:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:47:49.741229543 +0000 UTC m=+1399.588836024" watchObservedRunningTime="2025-11-28 15:47:49.743098963 +0000 UTC m=+1399.590705394" Nov 28 15:47:57 crc kubenswrapper[4647]: I1128 15:47:57.350705 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-667c9c995c-qp6ls" Nov 28 15:47:57 crc kubenswrapper[4647]: I1128 15:47:57.462478 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kwxhz"] Nov 28 15:47:57 crc kubenswrapper[4647]: I1128 15:47:57.462779 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" podUID="ea69567b-ba5e-49b8-827a-2b6264ec33bd" containerName="dnsmasq-dns" containerID="cri-o://06686196ca3ae262607cf92dbc12eeaa4376df20d3667df1ba1aee499bbce9c3" gracePeriod=10 Nov 28 15:47:57 crc kubenswrapper[4647]: I1128 15:47:57.814455 4647 generic.go:334] "Generic (PLEG): container finished" podID="ea69567b-ba5e-49b8-827a-2b6264ec33bd" containerID="06686196ca3ae262607cf92dbc12eeaa4376df20d3667df1ba1aee499bbce9c3" exitCode=0 Nov 28 15:47:57 crc kubenswrapper[4647]: I1128 15:47:57.814506 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" event={"ID":"ea69567b-ba5e-49b8-827a-2b6264ec33bd","Type":"ContainerDied","Data":"06686196ca3ae262607cf92dbc12eeaa4376df20d3667df1ba1aee499bbce9c3"} Nov 28 15:47:57 crc kubenswrapper[4647]: I1128 15:47:57.942976 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.089512 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-nb\") pod \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.089587 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-openstack-edpm-ipam\") pod \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.089711 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-svc\") pod \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.089744 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-config\") pod \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.089838 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qx56\" (UniqueName: \"kubernetes.io/projected/ea69567b-ba5e-49b8-827a-2b6264ec33bd-kube-api-access-2qx56\") pod \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.089929 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-sb\") pod \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.090025 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-swift-storage-0\") pod \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\" (UID: \"ea69567b-ba5e-49b8-827a-2b6264ec33bd\") " Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.116276 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea69567b-ba5e-49b8-827a-2b6264ec33bd-kube-api-access-2qx56" (OuterVolumeSpecName: "kube-api-access-2qx56") pod "ea69567b-ba5e-49b8-827a-2b6264ec33bd" (UID: "ea69567b-ba5e-49b8-827a-2b6264ec33bd"). InnerVolumeSpecName "kube-api-access-2qx56". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.158703 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ea69567b-ba5e-49b8-827a-2b6264ec33bd" (UID: "ea69567b-ba5e-49b8-827a-2b6264ec33bd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.162827 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-config" (OuterVolumeSpecName: "config") pod "ea69567b-ba5e-49b8-827a-2b6264ec33bd" (UID: "ea69567b-ba5e-49b8-827a-2b6264ec33bd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.169359 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "ea69567b-ba5e-49b8-827a-2b6264ec33bd" (UID: "ea69567b-ba5e-49b8-827a-2b6264ec33bd"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.184866 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ea69567b-ba5e-49b8-827a-2b6264ec33bd" (UID: "ea69567b-ba5e-49b8-827a-2b6264ec33bd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.186026 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ea69567b-ba5e-49b8-827a-2b6264ec33bd" (UID: "ea69567b-ba5e-49b8-827a-2b6264ec33bd"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.194326 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.194776 4647 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.194882 4647 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.194964 4647 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.195037 4647 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.195102 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qx56\" (UniqueName: \"kubernetes.io/projected/ea69567b-ba5e-49b8-827a-2b6264ec33bd-kube-api-access-2qx56\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.210804 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ea69567b-ba5e-49b8-827a-2b6264ec33bd" (UID: "ea69567b-ba5e-49b8-827a-2b6264ec33bd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.296848 4647 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea69567b-ba5e-49b8-827a-2b6264ec33bd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.830830 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" event={"ID":"ea69567b-ba5e-49b8-827a-2b6264ec33bd","Type":"ContainerDied","Data":"4baefc5d5026f9eaaa3daaf4d6d1df1d289242056583e58e241c4d72efadf6ae"} Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.831136 4647 scope.go:117] "RemoveContainer" containerID="06686196ca3ae262607cf92dbc12eeaa4376df20d3667df1ba1aee499bbce9c3" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.831266 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-kwxhz" Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.864020 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kwxhz"] Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.875910 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kwxhz"] Nov 28 15:47:58 crc kubenswrapper[4647]: I1128 15:47:58.877747 4647 scope.go:117] "RemoveContainer" containerID="1b9458f3171c6ff5f2600e311f52aacbf51ab152e3667aff61afab0303a6d5c8" Nov 28 15:48:00 crc kubenswrapper[4647]: I1128 15:48:00.417761 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea69567b-ba5e-49b8-827a-2b6264ec33bd" path="/var/lib/kubelet/pods/ea69567b-ba5e-49b8-827a-2b6264ec33bd/volumes" Nov 28 15:48:10 crc kubenswrapper[4647]: I1128 15:48:10.955270 4647 generic.go:334] "Generic (PLEG): container finished" podID="50660b1a-39a9-4ada-a275-a068d6b406bf" containerID="1948edfd01b54c0cfb20efa3c22c73cd27397b49447289f806e70643bf4c2695" exitCode=0 Nov 28 15:48:10 crc kubenswrapper[4647]: I1128 15:48:10.955362 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"50660b1a-39a9-4ada-a275-a068d6b406bf","Type":"ContainerDied","Data":"1948edfd01b54c0cfb20efa3c22c73cd27397b49447289f806e70643bf4c2695"} Nov 28 15:48:11 crc kubenswrapper[4647]: I1128 15:48:11.968695 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"50660b1a-39a9-4ada-a275-a068d6b406bf","Type":"ContainerStarted","Data":"8e7fa92188c3c1f32804a0a6ddc91c5dcb6afb7126f9a20212f9a2617a0975e1"} Nov 28 15:48:11 crc kubenswrapper[4647]: I1128 15:48:11.969288 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 15:48:12 crc kubenswrapper[4647]: I1128 15:48:12.009384 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.009364821 podStartE2EDuration="37.009364821s" podCreationTimestamp="2025-11-28 15:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:48:11.996557051 +0000 UTC m=+1421.844163492" watchObservedRunningTime="2025-11-28 15:48:12.009364821 +0000 UTC m=+1421.856971252" Nov 28 15:48:13 crc kubenswrapper[4647]: I1128 15:48:13.987707 4647 generic.go:334] "Generic (PLEG): container finished" podID="271d0057-21bf-4899-9284-d8d2beb015b6" containerID="4d329a7ce2f0465bf6c68cf10bfa5e61a096f84642665816df03aa7daf8506dd" exitCode=0 Nov 28 15:48:13 crc kubenswrapper[4647]: I1128 15:48:13.987775 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"271d0057-21bf-4899-9284-d8d2beb015b6","Type":"ContainerDied","Data":"4d329a7ce2f0465bf6c68cf10bfa5e61a096f84642665816df03aa7daf8506dd"} Nov 28 15:48:14 crc kubenswrapper[4647]: I1128 15:48:14.756201 4647 scope.go:117] "RemoveContainer" containerID="1d765177d4ab6b1adb6f24dda7da2c260fbfa6a0ff35f2e53ad08e72328e8cf0" Nov 28 15:48:14 crc kubenswrapper[4647]: I1128 15:48:14.787842 4647 scope.go:117] "RemoveContainer" containerID="354bf77e9451bcb5641bdd9c7a7ea1368c5584e2d9219698e23563b126098da9" Nov 28 15:48:14 crc kubenswrapper[4647]: I1128 15:48:14.828324 4647 scope.go:117] "RemoveContainer" containerID="badb58cba2b5c07c01a67494cf365df196ed0be67916469e9b303908a2f267d5" Nov 28 15:48:14 crc kubenswrapper[4647]: I1128 15:48:14.875348 4647 scope.go:117] "RemoveContainer" containerID="ad0e0b2e3a3c83519f8a6aa7dcae530de651e0115c582b271c06fdb7f4eb07e2" Nov 28 15:48:14 crc kubenswrapper[4647]: I1128 15:48:14.898890 4647 scope.go:117] "RemoveContainer" containerID="61467da5271e1a0fdd32fbdb0f6df331cf73f80464bce7002140f82a8de6c7e8" Nov 28 15:48:14 crc kubenswrapper[4647]: I1128 15:48:14.930721 4647 scope.go:117] "RemoveContainer" containerID="bda0e7a2501d0632e4126d44012a72ccc6210bdc823dd29bf4dbd6deaaa1b302" Nov 28 15:48:14 crc kubenswrapper[4647]: I1128 15:48:14.960945 4647 scope.go:117] "RemoveContainer" containerID="2e75bde85ba51c8394b8568315f7a75ca517e7cb45987fe3d3a8fdd5fecd4f31" Nov 28 15:48:15 crc kubenswrapper[4647]: I1128 15:48:15.006746 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"271d0057-21bf-4899-9284-d8d2beb015b6","Type":"ContainerStarted","Data":"1d0945997fb57e35d164febcd2f19b3946270f56bb01c4fb45c02540247fe35c"} Nov 28 15:48:15 crc kubenswrapper[4647]: I1128 15:48:15.007012 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:48:15 crc kubenswrapper[4647]: I1128 15:48:15.048571 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.04853094 podStartE2EDuration="39.04853094s" podCreationTimestamp="2025-11-28 15:47:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:48:15.0338081 +0000 UTC m=+1424.881414521" watchObservedRunningTime="2025-11-28 15:48:15.04853094 +0000 UTC m=+1424.896137361" Nov 28 15:48:17 crc kubenswrapper[4647]: I1128 15:48:17.022793 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:48:17 crc kubenswrapper[4647]: I1128 15:48:17.023123 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.351463 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp"] Nov 28 15:48:21 crc kubenswrapper[4647]: E1128 15:48:21.352668 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea69567b-ba5e-49b8-827a-2b6264ec33bd" containerName="dnsmasq-dns" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.352684 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea69567b-ba5e-49b8-827a-2b6264ec33bd" containerName="dnsmasq-dns" Nov 28 15:48:21 crc kubenswrapper[4647]: E1128 15:48:21.352724 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3170fd20-182e-4cc7-b636-d6173b6c7f08" containerName="dnsmasq-dns" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.352732 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3170fd20-182e-4cc7-b636-d6173b6c7f08" containerName="dnsmasq-dns" Nov 28 15:48:21 crc kubenswrapper[4647]: E1128 15:48:21.352748 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3170fd20-182e-4cc7-b636-d6173b6c7f08" containerName="init" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.352756 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3170fd20-182e-4cc7-b636-d6173b6c7f08" containerName="init" Nov 28 15:48:21 crc kubenswrapper[4647]: E1128 15:48:21.352770 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea69567b-ba5e-49b8-827a-2b6264ec33bd" containerName="init" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.352776 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea69567b-ba5e-49b8-827a-2b6264ec33bd" containerName="init" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.353001 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="3170fd20-182e-4cc7-b636-d6173b6c7f08" containerName="dnsmasq-dns" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.353019 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea69567b-ba5e-49b8-827a-2b6264ec33bd" containerName="dnsmasq-dns" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.353777 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.357300 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.357555 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.357737 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.357743 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.371307 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp"] Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.519944 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.520364 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.520473 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd78w\" (UniqueName: \"kubernetes.io/projected/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-kube-api-access-sd78w\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.520578 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.622663 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.622974 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd78w\" (UniqueName: \"kubernetes.io/projected/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-kube-api-access-sd78w\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.623438 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.623998 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.629934 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.632223 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.636454 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.667448 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd78w\" (UniqueName: \"kubernetes.io/projected/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-kube-api-access-sd78w\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:21 crc kubenswrapper[4647]: I1128 15:48:21.674797 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:22 crc kubenswrapper[4647]: I1128 15:48:22.562701 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp"] Nov 28 15:48:23 crc kubenswrapper[4647]: I1128 15:48:23.082087 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" event={"ID":"ee739c78-f4a0-46eb-a0ca-a7bcab007c16","Type":"ContainerStarted","Data":"2a8b7fa7294ef493c35d482aae6007435cb1333c970b07d3a9be1cb87c76a97a"} Nov 28 15:48:25 crc kubenswrapper[4647]: I1128 15:48:25.742618 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 15:48:27 crc kubenswrapper[4647]: I1128 15:48:27.256619 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:48:35 crc kubenswrapper[4647]: I1128 15:48:35.067171 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:48:36 crc kubenswrapper[4647]: I1128 15:48:36.241084 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" event={"ID":"ee739c78-f4a0-46eb-a0ca-a7bcab007c16","Type":"ContainerStarted","Data":"c401c2f78fb5401691da39c818165c85413b73ad9c3c6ec39f88ed1a210681c6"} Nov 28 15:48:36 crc kubenswrapper[4647]: I1128 15:48:36.275327 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" podStartSLOduration=2.787500953 podStartE2EDuration="15.27530206s" podCreationTimestamp="2025-11-28 15:48:21 +0000 UTC" firstStartedPulling="2025-11-28 15:48:22.575598174 +0000 UTC m=+1432.423204595" lastFinishedPulling="2025-11-28 15:48:35.063399281 +0000 UTC m=+1444.911005702" observedRunningTime="2025-11-28 15:48:36.266600309 +0000 UTC m=+1446.114206740" watchObservedRunningTime="2025-11-28 15:48:36.27530206 +0000 UTC m=+1446.122908481" Nov 28 15:48:47 crc kubenswrapper[4647]: I1128 15:48:47.023114 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:48:47 crc kubenswrapper[4647]: I1128 15:48:47.023811 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:48:48 crc kubenswrapper[4647]: I1128 15:48:48.365595 4647 generic.go:334] "Generic (PLEG): container finished" podID="ee739c78-f4a0-46eb-a0ca-a7bcab007c16" containerID="c401c2f78fb5401691da39c818165c85413b73ad9c3c6ec39f88ed1a210681c6" exitCode=0 Nov 28 15:48:48 crc kubenswrapper[4647]: I1128 15:48:48.365675 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" event={"ID":"ee739c78-f4a0-46eb-a0ca-a7bcab007c16","Type":"ContainerDied","Data":"c401c2f78fb5401691da39c818165c85413b73ad9c3c6ec39f88ed1a210681c6"} Nov 28 15:48:49 crc kubenswrapper[4647]: I1128 15:48:49.850729 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:49 crc kubenswrapper[4647]: I1128 15:48:49.908619 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd78w\" (UniqueName: \"kubernetes.io/projected/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-kube-api-access-sd78w\") pod \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " Nov 28 15:48:49 crc kubenswrapper[4647]: I1128 15:48:49.909295 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-repo-setup-combined-ca-bundle\") pod \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " Nov 28 15:48:49 crc kubenswrapper[4647]: I1128 15:48:49.909475 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-inventory\") pod \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " Nov 28 15:48:49 crc kubenswrapper[4647]: I1128 15:48:49.909544 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-ssh-key\") pod \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\" (UID: \"ee739c78-f4a0-46eb-a0ca-a7bcab007c16\") " Nov 28 15:48:49 crc kubenswrapper[4647]: I1128 15:48:49.916961 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-kube-api-access-sd78w" (OuterVolumeSpecName: "kube-api-access-sd78w") pod "ee739c78-f4a0-46eb-a0ca-a7bcab007c16" (UID: "ee739c78-f4a0-46eb-a0ca-a7bcab007c16"). InnerVolumeSpecName "kube-api-access-sd78w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:48:49 crc kubenswrapper[4647]: I1128 15:48:49.924210 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "ee739c78-f4a0-46eb-a0ca-a7bcab007c16" (UID: "ee739c78-f4a0-46eb-a0ca-a7bcab007c16"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:48:49 crc kubenswrapper[4647]: I1128 15:48:49.940312 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-inventory" (OuterVolumeSpecName: "inventory") pod "ee739c78-f4a0-46eb-a0ca-a7bcab007c16" (UID: "ee739c78-f4a0-46eb-a0ca-a7bcab007c16"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:48:49 crc kubenswrapper[4647]: I1128 15:48:49.952632 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ee739c78-f4a0-46eb-a0ca-a7bcab007c16" (UID: "ee739c78-f4a0-46eb-a0ca-a7bcab007c16"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.012804 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.012855 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd78w\" (UniqueName: \"kubernetes.io/projected/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-kube-api-access-sd78w\") on node \"crc\" DevicePath \"\"" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.012892 4647 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.012917 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee739c78-f4a0-46eb-a0ca-a7bcab007c16-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.394717 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.415250 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp" event={"ID":"ee739c78-f4a0-46eb-a0ca-a7bcab007c16","Type":"ContainerDied","Data":"2a8b7fa7294ef493c35d482aae6007435cb1333c970b07d3a9be1cb87c76a97a"} Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.415308 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a8b7fa7294ef493c35d482aae6007435cb1333c970b07d3a9be1cb87c76a97a" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.532850 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9"] Nov 28 15:48:50 crc kubenswrapper[4647]: E1128 15:48:50.533386 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee739c78-f4a0-46eb-a0ca-a7bcab007c16" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.533429 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee739c78-f4a0-46eb-a0ca-a7bcab007c16" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.533703 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee739c78-f4a0-46eb-a0ca-a7bcab007c16" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.534621 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.537757 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.539440 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.540510 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.545258 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.549387 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9"] Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.627999 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-hjmk9\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.628106 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-hjmk9\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.628198 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6nn5\" (UniqueName: \"kubernetes.io/projected/54d11771-921e-4086-a17f-c853026c4a3e-kube-api-access-m6nn5\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-hjmk9\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.730284 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6nn5\" (UniqueName: \"kubernetes.io/projected/54d11771-921e-4086-a17f-c853026c4a3e-kube-api-access-m6nn5\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-hjmk9\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.730388 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-hjmk9\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.730484 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-hjmk9\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.737881 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-hjmk9\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.737904 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-hjmk9\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.752694 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6nn5\" (UniqueName: \"kubernetes.io/projected/54d11771-921e-4086-a17f-c853026c4a3e-kube-api-access-m6nn5\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-hjmk9\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:50 crc kubenswrapper[4647]: I1128 15:48:50.856832 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:51 crc kubenswrapper[4647]: I1128 15:48:51.435626 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9"] Nov 28 15:48:52 crc kubenswrapper[4647]: I1128 15:48:52.414193 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" event={"ID":"54d11771-921e-4086-a17f-c853026c4a3e","Type":"ContainerStarted","Data":"e623eaceaebd5666be4730e7c3b894abc2c229080887247ff264a1b5f78ce300"} Nov 28 15:48:52 crc kubenswrapper[4647]: I1128 15:48:52.416484 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" event={"ID":"54d11771-921e-4086-a17f-c853026c4a3e","Type":"ContainerStarted","Data":"43589c82c9ef12ac9b97bc8079b282265bc067f0df6204068bee6b85f9a3e54a"} Nov 28 15:48:52 crc kubenswrapper[4647]: I1128 15:48:52.440111 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" podStartSLOduration=1.832715938 podStartE2EDuration="2.440092611s" podCreationTimestamp="2025-11-28 15:48:50 +0000 UTC" firstStartedPulling="2025-11-28 15:48:51.447352382 +0000 UTC m=+1461.294958813" lastFinishedPulling="2025-11-28 15:48:52.054729025 +0000 UTC m=+1461.902335486" observedRunningTime="2025-11-28 15:48:52.432432707 +0000 UTC m=+1462.280039138" watchObservedRunningTime="2025-11-28 15:48:52.440092611 +0000 UTC m=+1462.287699032" Nov 28 15:48:56 crc kubenswrapper[4647]: I1128 15:48:56.466069 4647 generic.go:334] "Generic (PLEG): container finished" podID="54d11771-921e-4086-a17f-c853026c4a3e" containerID="e623eaceaebd5666be4730e7c3b894abc2c229080887247ff264a1b5f78ce300" exitCode=0 Nov 28 15:48:56 crc kubenswrapper[4647]: I1128 15:48:56.466227 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" event={"ID":"54d11771-921e-4086-a17f-c853026c4a3e","Type":"ContainerDied","Data":"e623eaceaebd5666be4730e7c3b894abc2c229080887247ff264a1b5f78ce300"} Nov 28 15:48:57 crc kubenswrapper[4647]: I1128 15:48:57.897315 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.008724 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6nn5\" (UniqueName: \"kubernetes.io/projected/54d11771-921e-4086-a17f-c853026c4a3e-kube-api-access-m6nn5\") pod \"54d11771-921e-4086-a17f-c853026c4a3e\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.008881 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-ssh-key\") pod \"54d11771-921e-4086-a17f-c853026c4a3e\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.008997 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-inventory\") pod \"54d11771-921e-4086-a17f-c853026c4a3e\" (UID: \"54d11771-921e-4086-a17f-c853026c4a3e\") " Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.022661 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54d11771-921e-4086-a17f-c853026c4a3e-kube-api-access-m6nn5" (OuterVolumeSpecName: "kube-api-access-m6nn5") pod "54d11771-921e-4086-a17f-c853026c4a3e" (UID: "54d11771-921e-4086-a17f-c853026c4a3e"). InnerVolumeSpecName "kube-api-access-m6nn5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.038231 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "54d11771-921e-4086-a17f-c853026c4a3e" (UID: "54d11771-921e-4086-a17f-c853026c4a3e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.045948 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-inventory" (OuterVolumeSpecName: "inventory") pod "54d11771-921e-4086-a17f-c853026c4a3e" (UID: "54d11771-921e-4086-a17f-c853026c4a3e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.112244 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6nn5\" (UniqueName: \"kubernetes.io/projected/54d11771-921e-4086-a17f-c853026c4a3e-kube-api-access-m6nn5\") on node \"crc\" DevicePath \"\"" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.112299 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.112309 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54d11771-921e-4086-a17f-c853026c4a3e-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.498095 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" event={"ID":"54d11771-921e-4086-a17f-c853026c4a3e","Type":"ContainerDied","Data":"43589c82c9ef12ac9b97bc8079b282265bc067f0df6204068bee6b85f9a3e54a"} Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.498644 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-hjmk9" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.498661 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43589c82c9ef12ac9b97bc8079b282265bc067f0df6204068bee6b85f9a3e54a" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.602275 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s"] Nov 28 15:48:58 crc kubenswrapper[4647]: E1128 15:48:58.602923 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54d11771-921e-4086-a17f-c853026c4a3e" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.602947 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="54d11771-921e-4086-a17f-c853026c4a3e" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.603194 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="54d11771-921e-4086-a17f-c853026c4a3e" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.604266 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.609813 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.610136 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.610362 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.610628 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.612210 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s"] Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.726107 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86r77\" (UniqueName: \"kubernetes.io/projected/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-kube-api-access-86r77\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.726175 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.726220 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.726597 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.828442 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86r77\" (UniqueName: \"kubernetes.io/projected/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-kube-api-access-86r77\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.828505 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.828541 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.828595 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.840074 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.842407 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.850207 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.854687 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86r77\" (UniqueName: \"kubernetes.io/projected/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-kube-api-access-86r77\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:58 crc kubenswrapper[4647]: I1128 15:48:58.937455 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:48:59 crc kubenswrapper[4647]: I1128 15:48:59.591477 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s"] Nov 28 15:49:00 crc kubenswrapper[4647]: I1128 15:49:00.520671 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" event={"ID":"60d6a4cd-44ae-46ff-a980-a81ddab3b98c","Type":"ContainerStarted","Data":"bbd1bde571e2d3a5e3ef87c8efc885fde037beafe8dd9b836ecfac3b1524e64f"} Nov 28 15:49:01 crc kubenswrapper[4647]: I1128 15:49:01.532980 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" event={"ID":"60d6a4cd-44ae-46ff-a980-a81ddab3b98c","Type":"ContainerStarted","Data":"0b3b5e46ff8ee4d783f5e147062d2c49ece474e4c58ed8e0f5d2936831ecf10f"} Nov 28 15:49:15 crc kubenswrapper[4647]: I1128 15:49:15.179085 4647 scope.go:117] "RemoveContainer" containerID="85fa52db2ce9a76d2340a5988cec119e28bd70c88e3919e7ab94006773523f4b" Nov 28 15:49:17 crc kubenswrapper[4647]: I1128 15:49:17.023430 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:49:17 crc kubenswrapper[4647]: I1128 15:49:17.024089 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:49:17 crc kubenswrapper[4647]: I1128 15:49:17.024155 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:49:17 crc kubenswrapper[4647]: I1128 15:49:17.025110 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:49:17 crc kubenswrapper[4647]: I1128 15:49:17.025174 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" gracePeriod=600 Nov 28 15:49:17 crc kubenswrapper[4647]: E1128 15:49:17.178828 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:49:17 crc kubenswrapper[4647]: I1128 15:49:17.735466 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" exitCode=0 Nov 28 15:49:17 crc kubenswrapper[4647]: I1128 15:49:17.735558 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0"} Nov 28 15:49:17 crc kubenswrapper[4647]: I1128 15:49:17.735659 4647 scope.go:117] "RemoveContainer" containerID="06d277d727639fef75113ec72cf0adfccb4fffa2c30a4bb0f3631c657cbb984b" Nov 28 15:49:17 crc kubenswrapper[4647]: I1128 15:49:17.737130 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:49:17 crc kubenswrapper[4647]: E1128 15:49:17.737883 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:49:17 crc kubenswrapper[4647]: I1128 15:49:17.791536 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" podStartSLOduration=19.100885793 podStartE2EDuration="19.791509946s" podCreationTimestamp="2025-11-28 15:48:58 +0000 UTC" firstStartedPulling="2025-11-28 15:48:59.598453794 +0000 UTC m=+1469.446060215" lastFinishedPulling="2025-11-28 15:49:00.289077947 +0000 UTC m=+1470.136684368" observedRunningTime="2025-11-28 15:49:01.561267228 +0000 UTC m=+1471.408873659" watchObservedRunningTime="2025-11-28 15:49:17.791509946 +0000 UTC m=+1487.639116407" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.426075 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rb2br"] Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.429450 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.451841 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rb2br"] Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.610341 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-catalog-content\") pod \"certified-operators-rb2br\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.610763 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlkgp\" (UniqueName: \"kubernetes.io/projected/0b34b228-36c2-4c49-8573-786030a12fa2-kube-api-access-hlkgp\") pod \"certified-operators-rb2br\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.610961 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-utilities\") pod \"certified-operators-rb2br\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.714670 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-catalog-content\") pod \"certified-operators-rb2br\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.714801 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlkgp\" (UniqueName: \"kubernetes.io/projected/0b34b228-36c2-4c49-8573-786030a12fa2-kube-api-access-hlkgp\") pod \"certified-operators-rb2br\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.714879 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-utilities\") pod \"certified-operators-rb2br\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.715500 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-catalog-content\") pod \"certified-operators-rb2br\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.716537 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-utilities\") pod \"certified-operators-rb2br\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.740526 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlkgp\" (UniqueName: \"kubernetes.io/projected/0b34b228-36c2-4c49-8573-786030a12fa2-kube-api-access-hlkgp\") pod \"certified-operators-rb2br\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:24 crc kubenswrapper[4647]: I1128 15:49:24.753478 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:25 crc kubenswrapper[4647]: I1128 15:49:25.074693 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rb2br"] Nov 28 15:49:25 crc kubenswrapper[4647]: I1128 15:49:25.834654 4647 generic.go:334] "Generic (PLEG): container finished" podID="0b34b228-36c2-4c49-8573-786030a12fa2" containerID="9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94" exitCode=0 Nov 28 15:49:25 crc kubenswrapper[4647]: I1128 15:49:25.834782 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb2br" event={"ID":"0b34b228-36c2-4c49-8573-786030a12fa2","Type":"ContainerDied","Data":"9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94"} Nov 28 15:49:25 crc kubenswrapper[4647]: I1128 15:49:25.835041 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb2br" event={"ID":"0b34b228-36c2-4c49-8573-786030a12fa2","Type":"ContainerStarted","Data":"ea4ce78e8d229b05146fc20585ac063541a9b025474bd9f521b2c29a5cad0937"} Nov 28 15:49:26 crc kubenswrapper[4647]: I1128 15:49:26.847976 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb2br" event={"ID":"0b34b228-36c2-4c49-8573-786030a12fa2","Type":"ContainerStarted","Data":"1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6"} Nov 28 15:49:28 crc kubenswrapper[4647]: I1128 15:49:28.882236 4647 generic.go:334] "Generic (PLEG): container finished" podID="0b34b228-36c2-4c49-8573-786030a12fa2" containerID="1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6" exitCode=0 Nov 28 15:49:28 crc kubenswrapper[4647]: I1128 15:49:28.882331 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb2br" event={"ID":"0b34b228-36c2-4c49-8573-786030a12fa2","Type":"ContainerDied","Data":"1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6"} Nov 28 15:49:29 crc kubenswrapper[4647]: I1128 15:49:29.898760 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb2br" event={"ID":"0b34b228-36c2-4c49-8573-786030a12fa2","Type":"ContainerStarted","Data":"4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068"} Nov 28 15:49:29 crc kubenswrapper[4647]: I1128 15:49:29.921022 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rb2br" podStartSLOduration=2.300265326 podStartE2EDuration="5.920994216s" podCreationTimestamp="2025-11-28 15:49:24 +0000 UTC" firstStartedPulling="2025-11-28 15:49:25.837794282 +0000 UTC m=+1495.685400743" lastFinishedPulling="2025-11-28 15:49:29.458523172 +0000 UTC m=+1499.306129633" observedRunningTime="2025-11-28 15:49:29.914487653 +0000 UTC m=+1499.762094074" watchObservedRunningTime="2025-11-28 15:49:29.920994216 +0000 UTC m=+1499.768600677" Nov 28 15:49:30 crc kubenswrapper[4647]: I1128 15:49:30.443127 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:49:30 crc kubenswrapper[4647]: E1128 15:49:30.446653 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:49:34 crc kubenswrapper[4647]: I1128 15:49:34.753837 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:34 crc kubenswrapper[4647]: I1128 15:49:34.754580 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:34 crc kubenswrapper[4647]: I1128 15:49:34.840374 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:35 crc kubenswrapper[4647]: I1128 15:49:35.017525 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:35 crc kubenswrapper[4647]: I1128 15:49:35.095990 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rb2br"] Nov 28 15:49:36 crc kubenswrapper[4647]: I1128 15:49:36.974754 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rb2br" podUID="0b34b228-36c2-4c49-8573-786030a12fa2" containerName="registry-server" containerID="cri-o://4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068" gracePeriod=2 Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.440512 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.507376 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-slx54"] Nov 28 15:49:37 crc kubenswrapper[4647]: E1128 15:49:37.508820 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b34b228-36c2-4c49-8573-786030a12fa2" containerName="extract-utilities" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.508837 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b34b228-36c2-4c49-8573-786030a12fa2" containerName="extract-utilities" Nov 28 15:49:37 crc kubenswrapper[4647]: E1128 15:49:37.508845 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b34b228-36c2-4c49-8573-786030a12fa2" containerName="extract-content" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.508851 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b34b228-36c2-4c49-8573-786030a12fa2" containerName="extract-content" Nov 28 15:49:37 crc kubenswrapper[4647]: E1128 15:49:37.508860 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b34b228-36c2-4c49-8573-786030a12fa2" containerName="registry-server" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.508866 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b34b228-36c2-4c49-8573-786030a12fa2" containerName="registry-server" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.509312 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b34b228-36c2-4c49-8573-786030a12fa2" containerName="registry-server" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.513734 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-catalog-content\") pod \"0b34b228-36c2-4c49-8573-786030a12fa2\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.513806 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlkgp\" (UniqueName: \"kubernetes.io/projected/0b34b228-36c2-4c49-8573-786030a12fa2-kube-api-access-hlkgp\") pod \"0b34b228-36c2-4c49-8573-786030a12fa2\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.513885 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-utilities\") pod \"0b34b228-36c2-4c49-8573-786030a12fa2\" (UID: \"0b34b228-36c2-4c49-8573-786030a12fa2\") " Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.515839 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-utilities" (OuterVolumeSpecName: "utilities") pod "0b34b228-36c2-4c49-8573-786030a12fa2" (UID: "0b34b228-36c2-4c49-8573-786030a12fa2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.516242 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.532977 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-slx54"] Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.573157 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b34b228-36c2-4c49-8573-786030a12fa2-kube-api-access-hlkgp" (OuterVolumeSpecName: "kube-api-access-hlkgp") pod "0b34b228-36c2-4c49-8573-786030a12fa2" (UID: "0b34b228-36c2-4c49-8573-786030a12fa2"). InnerVolumeSpecName "kube-api-access-hlkgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.616909 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-utilities\") pod \"community-operators-slx54\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.616963 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-catalog-content\") pod \"community-operators-slx54\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.617056 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7jhq\" (UniqueName: \"kubernetes.io/projected/6b578681-44de-4616-99eb-0fc0e8ba3c73-kube-api-access-z7jhq\") pod \"community-operators-slx54\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.617130 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.617150 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlkgp\" (UniqueName: \"kubernetes.io/projected/0b34b228-36c2-4c49-8573-786030a12fa2-kube-api-access-hlkgp\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.621776 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b34b228-36c2-4c49-8573-786030a12fa2" (UID: "0b34b228-36c2-4c49-8573-786030a12fa2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.718560 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-utilities\") pod \"community-operators-slx54\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.719142 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-catalog-content\") pod \"community-operators-slx54\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.719322 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7jhq\" (UniqueName: \"kubernetes.io/projected/6b578681-44de-4616-99eb-0fc0e8ba3c73-kube-api-access-z7jhq\") pod \"community-operators-slx54\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.719482 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b34b228-36c2-4c49-8573-786030a12fa2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.719925 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-utilities\") pod \"community-operators-slx54\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.719955 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-catalog-content\") pod \"community-operators-slx54\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.741737 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7jhq\" (UniqueName: \"kubernetes.io/projected/6b578681-44de-4616-99eb-0fc0e8ba3c73-kube-api-access-z7jhq\") pod \"community-operators-slx54\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:37 crc kubenswrapper[4647]: I1128 15:49:37.851446 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.075031 4647 generic.go:334] "Generic (PLEG): container finished" podID="0b34b228-36c2-4c49-8573-786030a12fa2" containerID="4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068" exitCode=0 Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.075077 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb2br" event={"ID":"0b34b228-36c2-4c49-8573-786030a12fa2","Type":"ContainerDied","Data":"4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068"} Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.075131 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb2br" event={"ID":"0b34b228-36c2-4c49-8573-786030a12fa2","Type":"ContainerDied","Data":"ea4ce78e8d229b05146fc20585ac063541a9b025474bd9f521b2c29a5cad0937"} Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.075160 4647 scope.go:117] "RemoveContainer" containerID="4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.075299 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rb2br" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.155230 4647 scope.go:117] "RemoveContainer" containerID="1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.171154 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rb2br"] Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.196539 4647 scope.go:117] "RemoveContainer" containerID="9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.207014 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rb2br"] Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.225013 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-slx54"] Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.287482 4647 scope.go:117] "RemoveContainer" containerID="4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068" Nov 28 15:49:38 crc kubenswrapper[4647]: E1128 15:49:38.288030 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068\": container with ID starting with 4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068 not found: ID does not exist" containerID="4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.288086 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068"} err="failed to get container status \"4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068\": rpc error: code = NotFound desc = could not find container \"4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068\": container with ID starting with 4abbec4fbddf751d942bbae1af81f49f0b2949662d0fbc5b87e1aefedf99e068 not found: ID does not exist" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.288106 4647 scope.go:117] "RemoveContainer" containerID="1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6" Nov 28 15:49:38 crc kubenswrapper[4647]: E1128 15:49:38.288579 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6\": container with ID starting with 1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6 not found: ID does not exist" containerID="1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.288642 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6"} err="failed to get container status \"1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6\": rpc error: code = NotFound desc = could not find container \"1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6\": container with ID starting with 1f19a39b459803a15c0e675a9743556a2dfe084786e95f037f4546b0f3faafb6 not found: ID does not exist" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.288678 4647 scope.go:117] "RemoveContainer" containerID="9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94" Nov 28 15:49:38 crc kubenswrapper[4647]: E1128 15:49:38.289215 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94\": container with ID starting with 9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94 not found: ID does not exist" containerID="9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.289237 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94"} err="failed to get container status \"9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94\": rpc error: code = NotFound desc = could not find container \"9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94\": container with ID starting with 9adfdce9260572e75eb967776bc242ef8a1f47b5bdabb78e78700b58c6510d94 not found: ID does not exist" Nov 28 15:49:38 crc kubenswrapper[4647]: I1128 15:49:38.405136 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b34b228-36c2-4c49-8573-786030a12fa2" path="/var/lib/kubelet/pods/0b34b228-36c2-4c49-8573-786030a12fa2/volumes" Nov 28 15:49:39 crc kubenswrapper[4647]: I1128 15:49:39.095831 4647 generic.go:334] "Generic (PLEG): container finished" podID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerID="f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193" exitCode=0 Nov 28 15:49:39 crc kubenswrapper[4647]: I1128 15:49:39.096317 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slx54" event={"ID":"6b578681-44de-4616-99eb-0fc0e8ba3c73","Type":"ContainerDied","Data":"f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193"} Nov 28 15:49:39 crc kubenswrapper[4647]: I1128 15:49:39.096369 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slx54" event={"ID":"6b578681-44de-4616-99eb-0fc0e8ba3c73","Type":"ContainerStarted","Data":"67b9a28bbba358906350b331203211e6baa58c607e64746464d50763b67a3bd5"} Nov 28 15:49:41 crc kubenswrapper[4647]: I1128 15:49:41.119302 4647 generic.go:334] "Generic (PLEG): container finished" podID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerID="91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac" exitCode=0 Nov 28 15:49:41 crc kubenswrapper[4647]: I1128 15:49:41.119406 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slx54" event={"ID":"6b578681-44de-4616-99eb-0fc0e8ba3c73","Type":"ContainerDied","Data":"91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac"} Nov 28 15:49:43 crc kubenswrapper[4647]: I1128 15:49:43.147720 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slx54" event={"ID":"6b578681-44de-4616-99eb-0fc0e8ba3c73","Type":"ContainerStarted","Data":"175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253"} Nov 28 15:49:43 crc kubenswrapper[4647]: I1128 15:49:43.185774 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-slx54" podStartSLOduration=3.521489285 podStartE2EDuration="6.18575052s" podCreationTimestamp="2025-11-28 15:49:37 +0000 UTC" firstStartedPulling="2025-11-28 15:49:39.103893692 +0000 UTC m=+1508.951500133" lastFinishedPulling="2025-11-28 15:49:41.768154907 +0000 UTC m=+1511.615761368" observedRunningTime="2025-11-28 15:49:43.172256322 +0000 UTC m=+1513.019862783" watchObservedRunningTime="2025-11-28 15:49:43.18575052 +0000 UTC m=+1513.033356941" Nov 28 15:49:43 crc kubenswrapper[4647]: I1128 15:49:43.394310 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:49:43 crc kubenswrapper[4647]: E1128 15:49:43.394617 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:49:47 crc kubenswrapper[4647]: I1128 15:49:47.852558 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:47 crc kubenswrapper[4647]: I1128 15:49:47.853500 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:47 crc kubenswrapper[4647]: I1128 15:49:47.931862 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:48 crc kubenswrapper[4647]: I1128 15:49:48.256974 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:48 crc kubenswrapper[4647]: I1128 15:49:48.312379 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-slx54"] Nov 28 15:49:50 crc kubenswrapper[4647]: I1128 15:49:50.259259 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-slx54" podUID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerName="registry-server" containerID="cri-o://175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253" gracePeriod=2 Nov 28 15:49:50 crc kubenswrapper[4647]: I1128 15:49:50.811567 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.010893 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7jhq\" (UniqueName: \"kubernetes.io/projected/6b578681-44de-4616-99eb-0fc0e8ba3c73-kube-api-access-z7jhq\") pod \"6b578681-44de-4616-99eb-0fc0e8ba3c73\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.010978 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-catalog-content\") pod \"6b578681-44de-4616-99eb-0fc0e8ba3c73\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.011001 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-utilities\") pod \"6b578681-44de-4616-99eb-0fc0e8ba3c73\" (UID: \"6b578681-44de-4616-99eb-0fc0e8ba3c73\") " Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.012502 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-utilities" (OuterVolumeSpecName: "utilities") pod "6b578681-44de-4616-99eb-0fc0e8ba3c73" (UID: "6b578681-44de-4616-99eb-0fc0e8ba3c73"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.026770 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b578681-44de-4616-99eb-0fc0e8ba3c73-kube-api-access-z7jhq" (OuterVolumeSpecName: "kube-api-access-z7jhq") pod "6b578681-44de-4616-99eb-0fc0e8ba3c73" (UID: "6b578681-44de-4616-99eb-0fc0e8ba3c73"). InnerVolumeSpecName "kube-api-access-z7jhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.073986 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6b578681-44de-4616-99eb-0fc0e8ba3c73" (UID: "6b578681-44de-4616-99eb-0fc0e8ba3c73"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.113564 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7jhq\" (UniqueName: \"kubernetes.io/projected/6b578681-44de-4616-99eb-0fc0e8ba3c73-kube-api-access-z7jhq\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.113598 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.114844 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b578681-44de-4616-99eb-0fc0e8ba3c73-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.275849 4647 generic.go:334] "Generic (PLEG): container finished" podID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerID="175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253" exitCode=0 Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.275916 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-slx54" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.275933 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slx54" event={"ID":"6b578681-44de-4616-99eb-0fc0e8ba3c73","Type":"ContainerDied","Data":"175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253"} Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.276528 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-slx54" event={"ID":"6b578681-44de-4616-99eb-0fc0e8ba3c73","Type":"ContainerDied","Data":"67b9a28bbba358906350b331203211e6baa58c607e64746464d50763b67a3bd5"} Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.276564 4647 scope.go:117] "RemoveContainer" containerID="175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.331198 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-slx54"] Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.341097 4647 scope.go:117] "RemoveContainer" containerID="91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.341395 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-slx54"] Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.364938 4647 scope.go:117] "RemoveContainer" containerID="f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.429454 4647 scope.go:117] "RemoveContainer" containerID="175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253" Nov 28 15:49:51 crc kubenswrapper[4647]: E1128 15:49:51.429999 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253\": container with ID starting with 175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253 not found: ID does not exist" containerID="175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.430050 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253"} err="failed to get container status \"175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253\": rpc error: code = NotFound desc = could not find container \"175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253\": container with ID starting with 175cc353bc5734c08319527d41d5f9e5f68aaa735418aa21871ee2d462d59253 not found: ID does not exist" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.430083 4647 scope.go:117] "RemoveContainer" containerID="91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac" Nov 28 15:49:51 crc kubenswrapper[4647]: E1128 15:49:51.430426 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac\": container with ID starting with 91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac not found: ID does not exist" containerID="91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.430473 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac"} err="failed to get container status \"91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac\": rpc error: code = NotFound desc = could not find container \"91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac\": container with ID starting with 91d0cceaf4aaa4d2737e8e6e15485491e80d1119da062ae354d1faffd3a521ac not found: ID does not exist" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.430505 4647 scope.go:117] "RemoveContainer" containerID="f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193" Nov 28 15:49:51 crc kubenswrapper[4647]: E1128 15:49:51.430769 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193\": container with ID starting with f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193 not found: ID does not exist" containerID="f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193" Nov 28 15:49:51 crc kubenswrapper[4647]: I1128 15:49:51.430804 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193"} err="failed to get container status \"f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193\": rpc error: code = NotFound desc = could not find container \"f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193\": container with ID starting with f634a9770e2a0adbdcffd810b7ed3850165926ba4df427109e54f6c78b82e193 not found: ID does not exist" Nov 28 15:49:52 crc kubenswrapper[4647]: I1128 15:49:52.429881 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b578681-44de-4616-99eb-0fc0e8ba3c73" path="/var/lib/kubelet/pods/6b578681-44de-4616-99eb-0fc0e8ba3c73/volumes" Nov 28 15:49:58 crc kubenswrapper[4647]: I1128 15:49:58.394704 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:49:58 crc kubenswrapper[4647]: E1128 15:49:58.396797 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:50:12 crc kubenswrapper[4647]: I1128 15:50:12.395500 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:50:12 crc kubenswrapper[4647]: E1128 15:50:12.397557 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:50:24 crc kubenswrapper[4647]: I1128 15:50:24.395898 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:50:24 crc kubenswrapper[4647]: E1128 15:50:24.396867 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:50:38 crc kubenswrapper[4647]: I1128 15:50:38.395368 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:50:38 crc kubenswrapper[4647]: E1128 15:50:38.396707 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.677174 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5fqgf"] Nov 28 15:50:43 crc kubenswrapper[4647]: E1128 15:50:43.684711 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerName="extract-utilities" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.684776 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerName="extract-utilities" Nov 28 15:50:43 crc kubenswrapper[4647]: E1128 15:50:43.684827 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerName="registry-server" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.684838 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerName="registry-server" Nov 28 15:50:43 crc kubenswrapper[4647]: E1128 15:50:43.684875 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerName="extract-content" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.684909 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerName="extract-content" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.685279 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b578681-44de-4616-99eb-0fc0e8ba3c73" containerName="registry-server" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.687851 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.689735 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5fqgf"] Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.808817 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-utilities\") pod \"redhat-operators-5fqgf\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.809856 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66qhv\" (UniqueName: \"kubernetes.io/projected/1f8812e8-b52a-48d3-8176-f9c569fafa9c-kube-api-access-66qhv\") pod \"redhat-operators-5fqgf\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.809949 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-catalog-content\") pod \"redhat-operators-5fqgf\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.912902 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66qhv\" (UniqueName: \"kubernetes.io/projected/1f8812e8-b52a-48d3-8176-f9c569fafa9c-kube-api-access-66qhv\") pod \"redhat-operators-5fqgf\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.913094 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-catalog-content\") pod \"redhat-operators-5fqgf\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.913238 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-utilities\") pod \"redhat-operators-5fqgf\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.914192 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-catalog-content\") pod \"redhat-operators-5fqgf\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.914549 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-utilities\") pod \"redhat-operators-5fqgf\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:43 crc kubenswrapper[4647]: I1128 15:50:43.936753 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66qhv\" (UniqueName: \"kubernetes.io/projected/1f8812e8-b52a-48d3-8176-f9c569fafa9c-kube-api-access-66qhv\") pod \"redhat-operators-5fqgf\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:44 crc kubenswrapper[4647]: I1128 15:50:44.016573 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:44 crc kubenswrapper[4647]: I1128 15:50:44.539862 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5fqgf"] Nov 28 15:50:44 crc kubenswrapper[4647]: I1128 15:50:44.917639 4647 generic.go:334] "Generic (PLEG): container finished" podID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerID="56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d" exitCode=0 Nov 28 15:50:44 crc kubenswrapper[4647]: I1128 15:50:44.917692 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fqgf" event={"ID":"1f8812e8-b52a-48d3-8176-f9c569fafa9c","Type":"ContainerDied","Data":"56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d"} Nov 28 15:50:44 crc kubenswrapper[4647]: I1128 15:50:44.917726 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fqgf" event={"ID":"1f8812e8-b52a-48d3-8176-f9c569fafa9c","Type":"ContainerStarted","Data":"e853ff8f357ba3458644a30e3de5b20bc8ab9746cfbb4f4fda89794d11ede5d6"} Nov 28 15:50:44 crc kubenswrapper[4647]: I1128 15:50:44.920367 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:50:46 crc kubenswrapper[4647]: I1128 15:50:46.940760 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fqgf" event={"ID":"1f8812e8-b52a-48d3-8176-f9c569fafa9c","Type":"ContainerStarted","Data":"7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa"} Nov 28 15:50:49 crc kubenswrapper[4647]: I1128 15:50:49.969883 4647 generic.go:334] "Generic (PLEG): container finished" podID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerID="7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa" exitCode=0 Nov 28 15:50:49 crc kubenswrapper[4647]: I1128 15:50:49.970185 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fqgf" event={"ID":"1f8812e8-b52a-48d3-8176-f9c569fafa9c","Type":"ContainerDied","Data":"7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa"} Nov 28 15:50:50 crc kubenswrapper[4647]: I1128 15:50:50.981440 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fqgf" event={"ID":"1f8812e8-b52a-48d3-8176-f9c569fafa9c","Type":"ContainerStarted","Data":"23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164"} Nov 28 15:50:51 crc kubenswrapper[4647]: I1128 15:50:51.003404 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5fqgf" podStartSLOduration=2.447936936 podStartE2EDuration="8.003391934s" podCreationTimestamp="2025-11-28 15:50:43 +0000 UTC" firstStartedPulling="2025-11-28 15:50:44.920070215 +0000 UTC m=+1574.767676636" lastFinishedPulling="2025-11-28 15:50:50.475525173 +0000 UTC m=+1580.323131634" observedRunningTime="2025-11-28 15:50:51.001125403 +0000 UTC m=+1580.848731824" watchObservedRunningTime="2025-11-28 15:50:51.003391934 +0000 UTC m=+1580.850998355" Nov 28 15:50:53 crc kubenswrapper[4647]: I1128 15:50:53.394930 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:50:53 crc kubenswrapper[4647]: E1128 15:50:53.395394 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:50:54 crc kubenswrapper[4647]: I1128 15:50:54.017161 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:54 crc kubenswrapper[4647]: I1128 15:50:54.017955 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:50:55 crc kubenswrapper[4647]: I1128 15:50:55.063013 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5fqgf" podUID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerName="registry-server" probeResult="failure" output=< Nov 28 15:50:55 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 15:50:55 crc kubenswrapper[4647]: > Nov 28 15:51:04 crc kubenswrapper[4647]: I1128 15:51:04.080673 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:51:04 crc kubenswrapper[4647]: I1128 15:51:04.148110 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:51:04 crc kubenswrapper[4647]: I1128 15:51:04.328353 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5fqgf"] Nov 28 15:51:04 crc kubenswrapper[4647]: I1128 15:51:04.395401 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:51:04 crc kubenswrapper[4647]: E1128 15:51:04.395777 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.123200 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5fqgf" podUID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerName="registry-server" containerID="cri-o://23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164" gracePeriod=2 Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.603133 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.768623 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-catalog-content\") pod \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.768707 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66qhv\" (UniqueName: \"kubernetes.io/projected/1f8812e8-b52a-48d3-8176-f9c569fafa9c-kube-api-access-66qhv\") pod \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.768893 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-utilities\") pod \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\" (UID: \"1f8812e8-b52a-48d3-8176-f9c569fafa9c\") " Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.770101 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-utilities" (OuterVolumeSpecName: "utilities") pod "1f8812e8-b52a-48d3-8176-f9c569fafa9c" (UID: "1f8812e8-b52a-48d3-8176-f9c569fafa9c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.781672 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f8812e8-b52a-48d3-8176-f9c569fafa9c-kube-api-access-66qhv" (OuterVolumeSpecName: "kube-api-access-66qhv") pod "1f8812e8-b52a-48d3-8176-f9c569fafa9c" (UID: "1f8812e8-b52a-48d3-8176-f9c569fafa9c"). InnerVolumeSpecName "kube-api-access-66qhv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.873865 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.873901 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66qhv\" (UniqueName: \"kubernetes.io/projected/1f8812e8-b52a-48d3-8176-f9c569fafa9c-kube-api-access-66qhv\") on node \"crc\" DevicePath \"\"" Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.878184 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1f8812e8-b52a-48d3-8176-f9c569fafa9c" (UID: "1f8812e8-b52a-48d3-8176-f9c569fafa9c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:51:05 crc kubenswrapper[4647]: I1128 15:51:05.976531 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8812e8-b52a-48d3-8176-f9c569fafa9c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.143012 4647 generic.go:334] "Generic (PLEG): container finished" podID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerID="23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164" exitCode=0 Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.143054 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fqgf" event={"ID":"1f8812e8-b52a-48d3-8176-f9c569fafa9c","Type":"ContainerDied","Data":"23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164"} Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.143081 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5fqgf" event={"ID":"1f8812e8-b52a-48d3-8176-f9c569fafa9c","Type":"ContainerDied","Data":"e853ff8f357ba3458644a30e3de5b20bc8ab9746cfbb4f4fda89794d11ede5d6"} Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.143101 4647 scope.go:117] "RemoveContainer" containerID="23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.143239 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5fqgf" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.183701 4647 scope.go:117] "RemoveContainer" containerID="7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.191180 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5fqgf"] Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.200023 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5fqgf"] Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.218164 4647 scope.go:117] "RemoveContainer" containerID="56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.277847 4647 scope.go:117] "RemoveContainer" containerID="23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164" Nov 28 15:51:06 crc kubenswrapper[4647]: E1128 15:51:06.279799 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164\": container with ID starting with 23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164 not found: ID does not exist" containerID="23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.279837 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164"} err="failed to get container status \"23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164\": rpc error: code = NotFound desc = could not find container \"23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164\": container with ID starting with 23f04e2b8a33ac615661338fe4aa837fe5df6e8bef17b15e6c7d4330a7570164 not found: ID does not exist" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.279862 4647 scope.go:117] "RemoveContainer" containerID="7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa" Nov 28 15:51:06 crc kubenswrapper[4647]: E1128 15:51:06.285006 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa\": container with ID starting with 7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa not found: ID does not exist" containerID="7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.285040 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa"} err="failed to get container status \"7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa\": rpc error: code = NotFound desc = could not find container \"7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa\": container with ID starting with 7cefe6f8ef1c8439049e9b13dd9976b8be6622ff1767a1d7e23d31cabff807aa not found: ID does not exist" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.285060 4647 scope.go:117] "RemoveContainer" containerID="56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d" Nov 28 15:51:06 crc kubenswrapper[4647]: E1128 15:51:06.287258 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d\": container with ID starting with 56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d not found: ID does not exist" containerID="56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.287290 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d"} err="failed to get container status \"56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d\": rpc error: code = NotFound desc = could not find container \"56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d\": container with ID starting with 56982b38a3ef3c5fae16a3ffcb604d3fc3064eafed976d8972d7d26cfcd5916d not found: ID does not exist" Nov 28 15:51:06 crc kubenswrapper[4647]: I1128 15:51:06.417931 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" path="/var/lib/kubelet/pods/1f8812e8-b52a-48d3-8176-f9c569fafa9c/volumes" Nov 28 15:51:16 crc kubenswrapper[4647]: I1128 15:51:16.396600 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:51:16 crc kubenswrapper[4647]: E1128 15:51:16.397467 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:51:30 crc kubenswrapper[4647]: I1128 15:51:30.400624 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:51:30 crc kubenswrapper[4647]: E1128 15:51:30.401499 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:51:36 crc kubenswrapper[4647]: I1128 15:51:36.063947 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-sq882"] Nov 28 15:51:36 crc kubenswrapper[4647]: I1128 15:51:36.076280 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-pj6vk"] Nov 28 15:51:36 crc kubenswrapper[4647]: I1128 15:51:36.086298 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-vd5k5"] Nov 28 15:51:36 crc kubenswrapper[4647]: I1128 15:51:36.094379 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-pj6vk"] Nov 28 15:51:36 crc kubenswrapper[4647]: I1128 15:51:36.101792 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-sq882"] Nov 28 15:51:36 crc kubenswrapper[4647]: I1128 15:51:36.109390 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-vd5k5"] Nov 28 15:51:36 crc kubenswrapper[4647]: I1128 15:51:36.415286 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5325b221-c839-4627-ade7-88b5887633c1" path="/var/lib/kubelet/pods/5325b221-c839-4627-ade7-88b5887633c1/volumes" Nov 28 15:51:36 crc kubenswrapper[4647]: I1128 15:51:36.417907 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac" path="/var/lib/kubelet/pods/6a2f1ed3-7cb6-449c-80b5-2e0c0ff874ac/volumes" Nov 28 15:51:36 crc kubenswrapper[4647]: I1128 15:51:36.420650 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3a08067-d4b3-4e12-a424-b849589289c9" path="/var/lib/kubelet/pods/b3a08067-d4b3-4e12-a424-b849589289c9/volumes" Nov 28 15:51:44 crc kubenswrapper[4647]: I1128 15:51:44.396292 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:51:44 crc kubenswrapper[4647]: E1128 15:51:44.397390 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:51:46 crc kubenswrapper[4647]: I1128 15:51:46.046971 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-b08b-account-create-hqtp6"] Nov 28 15:51:46 crc kubenswrapper[4647]: I1128 15:51:46.058698 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-0c68-account-create-pkl88"] Nov 28 15:51:46 crc kubenswrapper[4647]: I1128 15:51:46.071037 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-b08b-account-create-hqtp6"] Nov 28 15:51:46 crc kubenswrapper[4647]: I1128 15:51:46.083178 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-0c68-account-create-pkl88"] Nov 28 15:51:46 crc kubenswrapper[4647]: I1128 15:51:46.417088 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35e79807-4ef8-4edc-ac3c-7d04a71cc82e" path="/var/lib/kubelet/pods/35e79807-4ef8-4edc-ac3c-7d04a71cc82e/volumes" Nov 28 15:51:46 crc kubenswrapper[4647]: I1128 15:51:46.418122 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90e3a33a-b397-4644-9f1b-89677ee76424" path="/var/lib/kubelet/pods/90e3a33a-b397-4644-9f1b-89677ee76424/volumes" Nov 28 15:51:47 crc kubenswrapper[4647]: I1128 15:51:47.030821 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-84aa-account-create-g9ndj"] Nov 28 15:51:47 crc kubenswrapper[4647]: I1128 15:51:47.040280 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-84aa-account-create-g9ndj"] Nov 28 15:51:48 crc kubenswrapper[4647]: I1128 15:51:48.404352 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6" path="/var/lib/kubelet/pods/a40f21f8-fd75-4ac5-826a-4d7ae9ae14b6/volumes" Nov 28 15:51:57 crc kubenswrapper[4647]: I1128 15:51:57.395292 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:51:57 crc kubenswrapper[4647]: E1128 15:51:57.396396 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:52:08 crc kubenswrapper[4647]: I1128 15:52:08.395342 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:52:08 crc kubenswrapper[4647]: E1128 15:52:08.396193 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:52:12 crc kubenswrapper[4647]: I1128 15:52:12.068802 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-6rfmd"] Nov 28 15:52:12 crc kubenswrapper[4647]: I1128 15:52:12.083060 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-ftfj5"] Nov 28 15:52:12 crc kubenswrapper[4647]: I1128 15:52:12.100383 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-t7m57"] Nov 28 15:52:12 crc kubenswrapper[4647]: I1128 15:52:12.110330 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-6rfmd"] Nov 28 15:52:12 crc kubenswrapper[4647]: I1128 15:52:12.119530 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-ftfj5"] Nov 28 15:52:12 crc kubenswrapper[4647]: I1128 15:52:12.127919 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-t7m57"] Nov 28 15:52:12 crc kubenswrapper[4647]: I1128 15:52:12.405133 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75bbefac-ee0e-4ad3-92c2-38c7dc33c733" path="/var/lib/kubelet/pods/75bbefac-ee0e-4ad3-92c2-38c7dc33c733/volumes" Nov 28 15:52:12 crc kubenswrapper[4647]: I1128 15:52:12.406373 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6182ef5-d994-41fe-a7fc-d07b63bb0156" path="/var/lib/kubelet/pods/c6182ef5-d994-41fe-a7fc-d07b63bb0156/volumes" Nov 28 15:52:12 crc kubenswrapper[4647]: I1128 15:52:12.407903 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e65030f5-e153-41fc-9765-c4d02d353ef1" path="/var/lib/kubelet/pods/e65030f5-e153-41fc-9765-c4d02d353ef1/volumes" Nov 28 15:52:15 crc kubenswrapper[4647]: I1128 15:52:15.900137 4647 scope.go:117] "RemoveContainer" containerID="b61db8210975510a7c3df628ee1680245e15392c15c0651ad2b6b4632add8eb2" Nov 28 15:52:15 crc kubenswrapper[4647]: I1128 15:52:15.929352 4647 scope.go:117] "RemoveContainer" containerID="93f224bde4c7f68df896c358b5bc36a3b9fb404a8e2cadbe1cc7018f514d72b7" Nov 28 15:52:15 crc kubenswrapper[4647]: I1128 15:52:15.979607 4647 scope.go:117] "RemoveContainer" containerID="897e2bda119a05a250874d26620442a69065fe15d09e78a1538848bd8db7696b" Nov 28 15:52:16 crc kubenswrapper[4647]: I1128 15:52:16.040429 4647 scope.go:117] "RemoveContainer" containerID="ae0b1117beabadbba219d7813b2dcfd1d39468b97d7554983595089271c05d0e" Nov 28 15:52:16 crc kubenswrapper[4647]: I1128 15:52:16.074269 4647 scope.go:117] "RemoveContainer" containerID="b4b033dd682e1f836613580453c6017208b1e48d08782648666fdbb1ba5af303" Nov 28 15:52:16 crc kubenswrapper[4647]: I1128 15:52:16.129126 4647 scope.go:117] "RemoveContainer" containerID="3aabf00034f8c42bcc1436c3ea2d289a5955414228a5cffed2dcb0cfbf271987" Nov 28 15:52:16 crc kubenswrapper[4647]: I1128 15:52:16.174687 4647 scope.go:117] "RemoveContainer" containerID="f8208c12c3f309859fb2cfcd755392fbafac6502071e44c56fb7a18e9161d5c8" Nov 28 15:52:16 crc kubenswrapper[4647]: I1128 15:52:16.213998 4647 scope.go:117] "RemoveContainer" containerID="7cba9509a18cdcb6cb26891a229bb9a6dfbcb94bc2e43777d7b98186bb0c9c05" Nov 28 15:52:16 crc kubenswrapper[4647]: I1128 15:52:16.236895 4647 scope.go:117] "RemoveContainer" containerID="aa38034078bf7864703a09170c277cc314671f27200cd0251cbcde4ab050e633" Nov 28 15:52:23 crc kubenswrapper[4647]: I1128 15:52:23.394312 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:52:23 crc kubenswrapper[4647]: E1128 15:52:23.395159 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:52:25 crc kubenswrapper[4647]: I1128 15:52:25.031171 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-45f8-account-create-5nhrv"] Nov 28 15:52:25 crc kubenswrapper[4647]: I1128 15:52:25.047343 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-4rng7"] Nov 28 15:52:25 crc kubenswrapper[4647]: I1128 15:52:25.055481 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-45f8-account-create-5nhrv"] Nov 28 15:52:25 crc kubenswrapper[4647]: I1128 15:52:25.063558 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-4rng7"] Nov 28 15:52:26 crc kubenswrapper[4647]: I1128 15:52:26.043486 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-4869-account-create-5n2xv"] Nov 28 15:52:26 crc kubenswrapper[4647]: I1128 15:52:26.052656 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1460-account-create-9fj85"] Nov 28 15:52:26 crc kubenswrapper[4647]: I1128 15:52:26.059426 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-1460-account-create-9fj85"] Nov 28 15:52:26 crc kubenswrapper[4647]: I1128 15:52:26.065958 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-4869-account-create-5n2xv"] Nov 28 15:52:26 crc kubenswrapper[4647]: I1128 15:52:26.408074 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a202ad93-2e07-4d93-a9db-a5a1b095f22b" path="/var/lib/kubelet/pods/a202ad93-2e07-4d93-a9db-a5a1b095f22b/volumes" Nov 28 15:52:26 crc kubenswrapper[4647]: I1128 15:52:26.409675 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a46198d8-04b6-4efe-8bf9-49a8babad6b4" path="/var/lib/kubelet/pods/a46198d8-04b6-4efe-8bf9-49a8babad6b4/volumes" Nov 28 15:52:26 crc kubenswrapper[4647]: I1128 15:52:26.411127 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4796edf-cb5a-41a8-b06b-25233998f32b" path="/var/lib/kubelet/pods/d4796edf-cb5a-41a8-b06b-25233998f32b/volumes" Nov 28 15:52:26 crc kubenswrapper[4647]: I1128 15:52:26.412558 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e013c7a9-cf78-48c6-bd86-50065e0d2d19" path="/var/lib/kubelet/pods/e013c7a9-cf78-48c6-bd86-50065e0d2d19/volumes" Nov 28 15:52:26 crc kubenswrapper[4647]: I1128 15:52:26.986183 4647 generic.go:334] "Generic (PLEG): container finished" podID="60d6a4cd-44ae-46ff-a980-a81ddab3b98c" containerID="0b3b5e46ff8ee4d783f5e147062d2c49ece474e4c58ed8e0f5d2936831ecf10f" exitCode=0 Nov 28 15:52:26 crc kubenswrapper[4647]: I1128 15:52:26.986223 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" event={"ID":"60d6a4cd-44ae-46ff-a980-a81ddab3b98c","Type":"ContainerDied","Data":"0b3b5e46ff8ee4d783f5e147062d2c49ece474e4c58ed8e0f5d2936831ecf10f"} Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.591231 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.741028 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-ssh-key\") pod \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.741161 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-bootstrap-combined-ca-bundle\") pod \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.741297 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-inventory\") pod \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.741335 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86r77\" (UniqueName: \"kubernetes.io/projected/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-kube-api-access-86r77\") pod \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\" (UID: \"60d6a4cd-44ae-46ff-a980-a81ddab3b98c\") " Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.747970 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "60d6a4cd-44ae-46ff-a980-a81ddab3b98c" (UID: "60d6a4cd-44ae-46ff-a980-a81ddab3b98c"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.748686 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-kube-api-access-86r77" (OuterVolumeSpecName: "kube-api-access-86r77") pod "60d6a4cd-44ae-46ff-a980-a81ddab3b98c" (UID: "60d6a4cd-44ae-46ff-a980-a81ddab3b98c"). InnerVolumeSpecName "kube-api-access-86r77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.769082 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "60d6a4cd-44ae-46ff-a980-a81ddab3b98c" (UID: "60d6a4cd-44ae-46ff-a980-a81ddab3b98c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.770087 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-inventory" (OuterVolumeSpecName: "inventory") pod "60d6a4cd-44ae-46ff-a980-a81ddab3b98c" (UID: "60d6a4cd-44ae-46ff-a980-a81ddab3b98c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.848760 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.848805 4647 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.848822 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:52:28 crc kubenswrapper[4647]: I1128 15:52:28.848836 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86r77\" (UniqueName: \"kubernetes.io/projected/60d6a4cd-44ae-46ff-a980-a81ddab3b98c-kube-api-access-86r77\") on node \"crc\" DevicePath \"\"" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.014041 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" event={"ID":"60d6a4cd-44ae-46ff-a980-a81ddab3b98c","Type":"ContainerDied","Data":"bbd1bde571e2d3a5e3ef87c8efc885fde037beafe8dd9b836ecfac3b1524e64f"} Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.014088 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbd1bde571e2d3a5e3ef87c8efc885fde037beafe8dd9b836ecfac3b1524e64f" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.014113 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.125515 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z"] Nov 28 15:52:29 crc kubenswrapper[4647]: E1128 15:52:29.126122 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerName="extract-utilities" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.126147 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerName="extract-utilities" Nov 28 15:52:29 crc kubenswrapper[4647]: E1128 15:52:29.126169 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerName="extract-content" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.126178 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerName="extract-content" Nov 28 15:52:29 crc kubenswrapper[4647]: E1128 15:52:29.126209 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60d6a4cd-44ae-46ff-a980-a81ddab3b98c" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.126218 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="60d6a4cd-44ae-46ff-a980-a81ddab3b98c" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 15:52:29 crc kubenswrapper[4647]: E1128 15:52:29.126242 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerName="registry-server" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.126250 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerName="registry-server" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.126480 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f8812e8-b52a-48d3-8176-f9c569fafa9c" containerName="registry-server" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.126512 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="60d6a4cd-44ae-46ff-a980-a81ddab3b98c" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.127268 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.129954 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.130612 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.132512 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.133337 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.136852 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z"] Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.153828 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.153871 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p55bt\" (UniqueName: \"kubernetes.io/projected/aa17a444-8971-4590-a7a5-9d303c00b90e-kube-api-access-p55bt\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.153913 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.255170 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.255217 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p55bt\" (UniqueName: \"kubernetes.io/projected/aa17a444-8971-4590-a7a5-9d303c00b90e-kube-api-access-p55bt\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.255254 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.264209 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.274016 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.288863 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p55bt\" (UniqueName: \"kubernetes.io/projected/aa17a444-8971-4590-a7a5-9d303c00b90e-kube-api-access-p55bt\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:29 crc kubenswrapper[4647]: I1128 15:52:29.444181 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:52:30 crc kubenswrapper[4647]: I1128 15:52:30.002534 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z"] Nov 28 15:52:30 crc kubenswrapper[4647]: W1128 15:52:30.008179 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa17a444_8971_4590_a7a5_9d303c00b90e.slice/crio-df70356f9f300dfa9c1e194b22c6fdc78b90a8c21ebf8b94f7494d1966ebd538 WatchSource:0}: Error finding container df70356f9f300dfa9c1e194b22c6fdc78b90a8c21ebf8b94f7494d1966ebd538: Status 404 returned error can't find the container with id df70356f9f300dfa9c1e194b22c6fdc78b90a8c21ebf8b94f7494d1966ebd538 Nov 28 15:52:30 crc kubenswrapper[4647]: I1128 15:52:30.026597 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" event={"ID":"aa17a444-8971-4590-a7a5-9d303c00b90e","Type":"ContainerStarted","Data":"df70356f9f300dfa9c1e194b22c6fdc78b90a8c21ebf8b94f7494d1966ebd538"} Nov 28 15:52:30 crc kubenswrapper[4647]: I1128 15:52:30.746185 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:52:32 crc kubenswrapper[4647]: I1128 15:52:32.046785 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" event={"ID":"aa17a444-8971-4590-a7a5-9d303c00b90e","Type":"ContainerStarted","Data":"d291199dba7fc8661f6b3a0d71e38eccaec37bd1ac9c21fcc1ea7f0dfc3e4ea7"} Nov 28 15:52:32 crc kubenswrapper[4647]: I1128 15:52:32.075235 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" podStartSLOduration=2.346306898 podStartE2EDuration="3.075217759s" podCreationTimestamp="2025-11-28 15:52:29 +0000 UTC" firstStartedPulling="2025-11-28 15:52:30.011769741 +0000 UTC m=+1679.859376172" lastFinishedPulling="2025-11-28 15:52:30.740680602 +0000 UTC m=+1680.588287033" observedRunningTime="2025-11-28 15:52:32.066916499 +0000 UTC m=+1681.914522960" watchObservedRunningTime="2025-11-28 15:52:32.075217759 +0000 UTC m=+1681.922824180" Nov 28 15:52:35 crc kubenswrapper[4647]: I1128 15:52:35.395114 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:52:35 crc kubenswrapper[4647]: E1128 15:52:35.397083 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:52:45 crc kubenswrapper[4647]: I1128 15:52:45.052324 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-9gx56"] Nov 28 15:52:45 crc kubenswrapper[4647]: I1128 15:52:45.060038 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-9gx56"] Nov 28 15:52:46 crc kubenswrapper[4647]: I1128 15:52:46.410682 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5b74ca6-04a9-46a3-8aa2-658580db07c0" path="/var/lib/kubelet/pods/b5b74ca6-04a9-46a3-8aa2-658580db07c0/volumes" Nov 28 15:52:49 crc kubenswrapper[4647]: I1128 15:52:49.394542 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:52:49 crc kubenswrapper[4647]: E1128 15:52:49.395112 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:53:01 crc kubenswrapper[4647]: I1128 15:53:01.394398 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:53:01 crc kubenswrapper[4647]: E1128 15:53:01.397160 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:53:13 crc kubenswrapper[4647]: I1128 15:53:13.394929 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:53:13 crc kubenswrapper[4647]: E1128 15:53:13.396821 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:53:14 crc kubenswrapper[4647]: I1128 15:53:14.066440 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-s6szl"] Nov 28 15:53:14 crc kubenswrapper[4647]: I1128 15:53:14.082072 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-s6szl"] Nov 28 15:53:14 crc kubenswrapper[4647]: I1128 15:53:14.413820 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67922fe2-4c18-4d43-8d80-ede2c34cb2c6" path="/var/lib/kubelet/pods/67922fe2-4c18-4d43-8d80-ede2c34cb2c6/volumes" Nov 28 15:53:16 crc kubenswrapper[4647]: I1128 15:53:16.420765 4647 scope.go:117] "RemoveContainer" containerID="e607e8e0690608ba241165256f71c6844d5044b363755b54b79014f43b3031fa" Nov 28 15:53:16 crc kubenswrapper[4647]: I1128 15:53:16.448230 4647 scope.go:117] "RemoveContainer" containerID="37ebbc620e01d8d7bd6db309f510c0986df4c9cd46c8e2c170f9bcb9323cb00e" Nov 28 15:53:16 crc kubenswrapper[4647]: I1128 15:53:16.485799 4647 scope.go:117] "RemoveContainer" containerID="69ff0e64181b0f15e2178f8d9315a32ec4cfaa7d2bd0e4ce84313b9548ac2bd7" Nov 28 15:53:16 crc kubenswrapper[4647]: I1128 15:53:16.544136 4647 scope.go:117] "RemoveContainer" containerID="dabdd852a15e2df9199eb64966465f6cdb6cfedcda7a23116dd81ba11d668988" Nov 28 15:53:16 crc kubenswrapper[4647]: I1128 15:53:16.576509 4647 scope.go:117] "RemoveContainer" containerID="125a556b37f54e4b1e278f6754ea18ce3283617383b660855ba48c9868d2ad6b" Nov 28 15:53:16 crc kubenswrapper[4647]: I1128 15:53:16.613538 4647 scope.go:117] "RemoveContainer" containerID="0d1ba4f54393cc1a7d2d80a35761c9939500729559ff14ca733d9569bb273beb" Nov 28 15:53:26 crc kubenswrapper[4647]: I1128 15:53:26.394801 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:53:26 crc kubenswrapper[4647]: E1128 15:53:26.395678 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:53:32 crc kubenswrapper[4647]: I1128 15:53:32.070975 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-6k9fd"] Nov 28 15:53:32 crc kubenswrapper[4647]: I1128 15:53:32.080985 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-6k9fd"] Nov 28 15:53:32 crc kubenswrapper[4647]: I1128 15:53:32.410305 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75b1595c-cca5-4809-94cf-c1c0ec937c27" path="/var/lib/kubelet/pods/75b1595c-cca5-4809-94cf-c1c0ec937c27/volumes" Nov 28 15:53:33 crc kubenswrapper[4647]: I1128 15:53:33.059678 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-n92kx"] Nov 28 15:53:33 crc kubenswrapper[4647]: I1128 15:53:33.069910 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-n92kx"] Nov 28 15:53:34 crc kubenswrapper[4647]: I1128 15:53:34.416083 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c34bccdb-127a-40c0-ac61-6e2f354a6c6d" path="/var/lib/kubelet/pods/c34bccdb-127a-40c0-ac61-6e2f354a6c6d/volumes" Nov 28 15:53:38 crc kubenswrapper[4647]: I1128 15:53:38.394785 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:53:38 crc kubenswrapper[4647]: E1128 15:53:38.395938 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:53:41 crc kubenswrapper[4647]: I1128 15:53:41.047691 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-4n48c"] Nov 28 15:53:41 crc kubenswrapper[4647]: I1128 15:53:41.065827 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-4n48c"] Nov 28 15:53:42 crc kubenswrapper[4647]: I1128 15:53:42.412397 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb420869-c42b-47a6-8e22-c4e263d9a666" path="/var/lib/kubelet/pods/fb420869-c42b-47a6-8e22-c4e263d9a666/volumes" Nov 28 15:53:53 crc kubenswrapper[4647]: I1128 15:53:53.394648 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:53:53 crc kubenswrapper[4647]: E1128 15:53:53.395584 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:53:54 crc kubenswrapper[4647]: I1128 15:53:54.049842 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-p265g"] Nov 28 15:53:54 crc kubenswrapper[4647]: I1128 15:53:54.059747 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-p265g"] Nov 28 15:53:54 crc kubenswrapper[4647]: I1128 15:53:54.416271 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f54b294-79b2-4097-9011-f094f66cc705" path="/var/lib/kubelet/pods/4f54b294-79b2-4097-9011-f094f66cc705/volumes" Nov 28 15:54:06 crc kubenswrapper[4647]: I1128 15:54:06.395071 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:54:06 crc kubenswrapper[4647]: E1128 15:54:06.396163 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 15:54:16 crc kubenswrapper[4647]: I1128 15:54:16.789765 4647 scope.go:117] "RemoveContainer" containerID="e703c83e2676b9ada434ba75eb4631cfd27f062cb2124853751aee90a84e4396" Nov 28 15:54:16 crc kubenswrapper[4647]: I1128 15:54:16.856114 4647 scope.go:117] "RemoveContainer" containerID="2eab7b2bfb275405cd699a6b63cd2f3543dfddc8df6e0b930ae4d2422889ac00" Nov 28 15:54:16 crc kubenswrapper[4647]: I1128 15:54:16.916108 4647 scope.go:117] "RemoveContainer" containerID="0c7941f64ec343f785e561d3ff7461d27b9638e37faf8497e409296aacfa6710" Nov 28 15:54:16 crc kubenswrapper[4647]: I1128 15:54:16.959753 4647 scope.go:117] "RemoveContainer" containerID="8677705d8f0bedddf37e0a160a871511a7e394b111b5da45276d87698d685fcb" Nov 28 15:54:20 crc kubenswrapper[4647]: I1128 15:54:20.400046 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:54:21 crc kubenswrapper[4647]: I1128 15:54:21.302238 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"fbd9955acb6f04109aaef547a7a78f405714e4a2cfc5b9f97b929edb35fff07d"} Nov 28 15:54:23 crc kubenswrapper[4647]: I1128 15:54:23.340103 4647 generic.go:334] "Generic (PLEG): container finished" podID="aa17a444-8971-4590-a7a5-9d303c00b90e" containerID="d291199dba7fc8661f6b3a0d71e38eccaec37bd1ac9c21fcc1ea7f0dfc3e4ea7" exitCode=0 Nov 28 15:54:23 crc kubenswrapper[4647]: I1128 15:54:23.340285 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" event={"ID":"aa17a444-8971-4590-a7a5-9d303c00b90e","Type":"ContainerDied","Data":"d291199dba7fc8661f6b3a0d71e38eccaec37bd1ac9c21fcc1ea7f0dfc3e4ea7"} Nov 28 15:54:24 crc kubenswrapper[4647]: I1128 15:54:24.747126 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:54:24 crc kubenswrapper[4647]: I1128 15:54:24.904563 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p55bt\" (UniqueName: \"kubernetes.io/projected/aa17a444-8971-4590-a7a5-9d303c00b90e-kube-api-access-p55bt\") pod \"aa17a444-8971-4590-a7a5-9d303c00b90e\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " Nov 28 15:54:24 crc kubenswrapper[4647]: I1128 15:54:24.904835 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-ssh-key\") pod \"aa17a444-8971-4590-a7a5-9d303c00b90e\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " Nov 28 15:54:24 crc kubenswrapper[4647]: I1128 15:54:24.904916 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-inventory\") pod \"aa17a444-8971-4590-a7a5-9d303c00b90e\" (UID: \"aa17a444-8971-4590-a7a5-9d303c00b90e\") " Nov 28 15:54:24 crc kubenswrapper[4647]: I1128 15:54:24.919430 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa17a444-8971-4590-a7a5-9d303c00b90e-kube-api-access-p55bt" (OuterVolumeSpecName: "kube-api-access-p55bt") pod "aa17a444-8971-4590-a7a5-9d303c00b90e" (UID: "aa17a444-8971-4590-a7a5-9d303c00b90e"). InnerVolumeSpecName "kube-api-access-p55bt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:54:24 crc kubenswrapper[4647]: I1128 15:54:24.934186 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-inventory" (OuterVolumeSpecName: "inventory") pod "aa17a444-8971-4590-a7a5-9d303c00b90e" (UID: "aa17a444-8971-4590-a7a5-9d303c00b90e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:54:24 crc kubenswrapper[4647]: I1128 15:54:24.942877 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "aa17a444-8971-4590-a7a5-9d303c00b90e" (UID: "aa17a444-8971-4590-a7a5-9d303c00b90e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.007765 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p55bt\" (UniqueName: \"kubernetes.io/projected/aa17a444-8971-4590-a7a5-9d303c00b90e-kube-api-access-p55bt\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.009536 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.009584 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/aa17a444-8971-4590-a7a5-9d303c00b90e-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.356867 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" event={"ID":"aa17a444-8971-4590-a7a5-9d303c00b90e","Type":"ContainerDied","Data":"df70356f9f300dfa9c1e194b22c6fdc78b90a8c21ebf8b94f7494d1966ebd538"} Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.356913 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df70356f9f300dfa9c1e194b22c6fdc78b90a8c21ebf8b94f7494d1966ebd538" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.356958 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.465342 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5"] Nov 28 15:54:25 crc kubenswrapper[4647]: E1128 15:54:25.465851 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa17a444-8971-4590-a7a5-9d303c00b90e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.465873 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa17a444-8971-4590-a7a5-9d303c00b90e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.466068 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa17a444-8971-4590-a7a5-9d303c00b90e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.466780 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.469234 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.469473 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.469988 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.471614 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5"] Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.474269 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.620997 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-flpg5\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.621067 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9c8z\" (UniqueName: \"kubernetes.io/projected/77715da8-feee-451f-a972-a2e52884582a-kube-api-access-t9c8z\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-flpg5\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.621223 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-flpg5\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.722605 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-flpg5\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.722946 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9c8z\" (UniqueName: \"kubernetes.io/projected/77715da8-feee-451f-a972-a2e52884582a-kube-api-access-t9c8z\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-flpg5\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.723064 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-flpg5\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.727628 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-flpg5\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.727840 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-flpg5\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.748953 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9c8z\" (UniqueName: \"kubernetes.io/projected/77715da8-feee-451f-a972-a2e52884582a-kube-api-access-t9c8z\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-flpg5\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:25 crc kubenswrapper[4647]: I1128 15:54:25.783794 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:54:26 crc kubenswrapper[4647]: I1128 15:54:26.379943 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5"] Nov 28 15:54:27 crc kubenswrapper[4647]: I1128 15:54:27.377380 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" event={"ID":"77715da8-feee-451f-a972-a2e52884582a","Type":"ContainerStarted","Data":"6215aedd36fda5dba25b5004685fdbf62a3fded0cf3072da3f03f45a7a236057"} Nov 28 15:54:28 crc kubenswrapper[4647]: I1128 15:54:28.390990 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" event={"ID":"77715da8-feee-451f-a972-a2e52884582a","Type":"ContainerStarted","Data":"528ee282214f6089feec39c712a210cf693e9780ee63355c4ea16a937f71b057"} Nov 28 15:54:28 crc kubenswrapper[4647]: I1128 15:54:28.415130 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" podStartSLOduration=2.8161480169999997 podStartE2EDuration="3.415112117s" podCreationTimestamp="2025-11-28 15:54:25 +0000 UTC" firstStartedPulling="2025-11-28 15:54:26.386837803 +0000 UTC m=+1796.234444214" lastFinishedPulling="2025-11-28 15:54:26.985801893 +0000 UTC m=+1796.833408314" observedRunningTime="2025-11-28 15:54:28.406476577 +0000 UTC m=+1798.254083008" watchObservedRunningTime="2025-11-28 15:54:28.415112117 +0000 UTC m=+1798.262718548" Nov 28 15:54:35 crc kubenswrapper[4647]: I1128 15:54:35.058816 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-bkg6q"] Nov 28 15:54:35 crc kubenswrapper[4647]: I1128 15:54:35.069012 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-7tcfv"] Nov 28 15:54:35 crc kubenswrapper[4647]: I1128 15:54:35.078913 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-6v9rt"] Nov 28 15:54:35 crc kubenswrapper[4647]: I1128 15:54:35.089364 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-6v9rt"] Nov 28 15:54:35 crc kubenswrapper[4647]: I1128 15:54:35.097270 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-bkg6q"] Nov 28 15:54:35 crc kubenswrapper[4647]: I1128 15:54:35.104289 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-7tcfv"] Nov 28 15:54:36 crc kubenswrapper[4647]: I1128 15:54:36.411483 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4148fe73-7269-47f4-9c12-60ec17192441" path="/var/lib/kubelet/pods/4148fe73-7269-47f4-9c12-60ec17192441/volumes" Nov 28 15:54:36 crc kubenswrapper[4647]: I1128 15:54:36.413179 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63e70ac4-fcb7-4bce-a0b2-5c148e71514f" path="/var/lib/kubelet/pods/63e70ac4-fcb7-4bce-a0b2-5c148e71514f/volumes" Nov 28 15:54:36 crc kubenswrapper[4647]: I1128 15:54:36.414029 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c2c650d-ce2e-4087-bb19-f199ed233bcf" path="/var/lib/kubelet/pods/9c2c650d-ce2e-4087-bb19-f199ed233bcf/volumes" Nov 28 15:54:47 crc kubenswrapper[4647]: I1128 15:54:47.045602 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-7291-account-create-7r4r7"] Nov 28 15:54:47 crc kubenswrapper[4647]: I1128 15:54:47.054156 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-7291-account-create-7r4r7"] Nov 28 15:54:48 crc kubenswrapper[4647]: I1128 15:54:48.033320 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-f1f8-account-create-cxfkp"] Nov 28 15:54:48 crc kubenswrapper[4647]: I1128 15:54:48.042749 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-662e-account-create-rqmwd"] Nov 28 15:54:48 crc kubenswrapper[4647]: I1128 15:54:48.056603 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-662e-account-create-rqmwd"] Nov 28 15:54:48 crc kubenswrapper[4647]: I1128 15:54:48.063536 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-f1f8-account-create-cxfkp"] Nov 28 15:54:48 crc kubenswrapper[4647]: I1128 15:54:48.412557 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="691a522b-98e3-4e61-85e1-5f66167bd77e" path="/var/lib/kubelet/pods/691a522b-98e3-4e61-85e1-5f66167bd77e/volumes" Nov 28 15:54:48 crc kubenswrapper[4647]: I1128 15:54:48.413259 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5b9747f-3a90-42ca-885e-119cdcb53280" path="/var/lib/kubelet/pods/a5b9747f-3a90-42ca-885e-119cdcb53280/volumes" Nov 28 15:54:48 crc kubenswrapper[4647]: I1128 15:54:48.413960 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6933167-f4cc-48bf-a880-f05c6f1dee6e" path="/var/lib/kubelet/pods/d6933167-f4cc-48bf-a880-f05c6f1dee6e/volumes" Nov 28 15:55:17 crc kubenswrapper[4647]: I1128 15:55:17.106206 4647 scope.go:117] "RemoveContainer" containerID="ffa42b1cd2f4d658abc916d4d2b3df0217254fc3484da9fbfe7abd0c5f337b64" Nov 28 15:55:17 crc kubenswrapper[4647]: I1128 15:55:17.140839 4647 scope.go:117] "RemoveContainer" containerID="8014c989c1857fa51b69435591fd0d2840fd173c803ad8365d77085a9f9975d3" Nov 28 15:55:17 crc kubenswrapper[4647]: I1128 15:55:17.210386 4647 scope.go:117] "RemoveContainer" containerID="75b9c0aba4dd0443d7b9692e3f7888f1b05a8e989ad81cb621c6e5dca0c83caf" Nov 28 15:55:17 crc kubenswrapper[4647]: I1128 15:55:17.244606 4647 scope.go:117] "RemoveContainer" containerID="d0abd98ff788e15b9b1243ec0a4ade0ffe868ae935541fe896a4121588b7b697" Nov 28 15:55:17 crc kubenswrapper[4647]: I1128 15:55:17.293597 4647 scope.go:117] "RemoveContainer" containerID="79c16e47fd6e3bd5826af7ddd6f8b6f9693e3583e2e655006211cb03dc7a2ecc" Nov 28 15:55:17 crc kubenswrapper[4647]: I1128 15:55:17.332896 4647 scope.go:117] "RemoveContainer" containerID="a62d7c5eea082182da67068ccddd5f0c14188f5c7d99fc1ff5be4a744c0d6dce" Nov 28 15:55:34 crc kubenswrapper[4647]: I1128 15:55:34.068652 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-78mbd"] Nov 28 15:55:34 crc kubenswrapper[4647]: I1128 15:55:34.080990 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-78mbd"] Nov 28 15:55:34 crc kubenswrapper[4647]: I1128 15:55:34.410733 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="575d03bd-0f8a-4f54-b6cb-2fdd4b48365b" path="/var/lib/kubelet/pods/575d03bd-0f8a-4f54-b6cb-2fdd4b48365b/volumes" Nov 28 15:55:50 crc kubenswrapper[4647]: I1128 15:55:50.326141 4647 generic.go:334] "Generic (PLEG): container finished" podID="77715da8-feee-451f-a972-a2e52884582a" containerID="528ee282214f6089feec39c712a210cf693e9780ee63355c4ea16a937f71b057" exitCode=0 Nov 28 15:55:50 crc kubenswrapper[4647]: I1128 15:55:50.326216 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" event={"ID":"77715da8-feee-451f-a972-a2e52884582a","Type":"ContainerDied","Data":"528ee282214f6089feec39c712a210cf693e9780ee63355c4ea16a937f71b057"} Nov 28 15:55:51 crc kubenswrapper[4647]: I1128 15:55:51.785212 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:55:51 crc kubenswrapper[4647]: I1128 15:55:51.951612 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9c8z\" (UniqueName: \"kubernetes.io/projected/77715da8-feee-451f-a972-a2e52884582a-kube-api-access-t9c8z\") pod \"77715da8-feee-451f-a972-a2e52884582a\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " Nov 28 15:55:51 crc kubenswrapper[4647]: I1128 15:55:51.952071 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-ssh-key\") pod \"77715da8-feee-451f-a972-a2e52884582a\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " Nov 28 15:55:51 crc kubenswrapper[4647]: I1128 15:55:51.952349 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-inventory\") pod \"77715da8-feee-451f-a972-a2e52884582a\" (UID: \"77715da8-feee-451f-a972-a2e52884582a\") " Nov 28 15:55:51 crc kubenswrapper[4647]: I1128 15:55:51.958629 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77715da8-feee-451f-a972-a2e52884582a-kube-api-access-t9c8z" (OuterVolumeSpecName: "kube-api-access-t9c8z") pod "77715da8-feee-451f-a972-a2e52884582a" (UID: "77715da8-feee-451f-a972-a2e52884582a"). InnerVolumeSpecName "kube-api-access-t9c8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:55:51 crc kubenswrapper[4647]: I1128 15:55:51.988507 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-inventory" (OuterVolumeSpecName: "inventory") pod "77715da8-feee-451f-a972-a2e52884582a" (UID: "77715da8-feee-451f-a972-a2e52884582a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:55:51 crc kubenswrapper[4647]: I1128 15:55:51.999661 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "77715da8-feee-451f-a972-a2e52884582a" (UID: "77715da8-feee-451f-a972-a2e52884582a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.056377 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.056430 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9c8z\" (UniqueName: \"kubernetes.io/projected/77715da8-feee-451f-a972-a2e52884582a-kube-api-access-t9c8z\") on node \"crc\" DevicePath \"\"" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.056443 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77715da8-feee-451f-a972-a2e52884582a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.355062 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" event={"ID":"77715da8-feee-451f-a972-a2e52884582a","Type":"ContainerDied","Data":"6215aedd36fda5dba25b5004685fdbf62a3fded0cf3072da3f03f45a7a236057"} Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.355105 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6215aedd36fda5dba25b5004685fdbf62a3fded0cf3072da3f03f45a7a236057" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.355195 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-flpg5" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.476678 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv"] Nov 28 15:55:52 crc kubenswrapper[4647]: E1128 15:55:52.477064 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77715da8-feee-451f-a972-a2e52884582a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.477080 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="77715da8-feee-451f-a972-a2e52884582a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.477262 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="77715da8-feee-451f-a972-a2e52884582a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.477917 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.482280 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.482318 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.482698 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.482926 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.503072 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv"] Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.567452 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.567601 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.567771 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9k4m5\" (UniqueName: \"kubernetes.io/projected/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-kube-api-access-9k4m5\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.669264 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.669369 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.669536 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9k4m5\" (UniqueName: \"kubernetes.io/projected/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-kube-api-access-9k4m5\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.674994 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.675902 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.689946 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9k4m5\" (UniqueName: \"kubernetes.io/projected/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-kube-api-access-9k4m5\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:52 crc kubenswrapper[4647]: I1128 15:55:52.802040 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:55:53 crc kubenswrapper[4647]: I1128 15:55:53.357061 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:55:53 crc kubenswrapper[4647]: I1128 15:55:53.364722 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv"] Nov 28 15:55:53 crc kubenswrapper[4647]: I1128 15:55:53.368181 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" event={"ID":"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5","Type":"ContainerStarted","Data":"f5c6a039061a432ff04f9841c212227d9381edbf3c447b83607cae045c5c4d86"} Nov 28 15:55:54 crc kubenswrapper[4647]: I1128 15:55:54.383024 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" event={"ID":"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5","Type":"ContainerStarted","Data":"db6c9a309c94cede7b5cc73922aa26e9dbc06a209e23de061402143bab235c3d"} Nov 28 15:55:54 crc kubenswrapper[4647]: I1128 15:55:54.411093 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" podStartSLOduration=1.783335168 podStartE2EDuration="2.411065693s" podCreationTimestamp="2025-11-28 15:55:52 +0000 UTC" firstStartedPulling="2025-11-28 15:55:53.356816935 +0000 UTC m=+1883.204423366" lastFinishedPulling="2025-11-28 15:55:53.98454747 +0000 UTC m=+1883.832153891" observedRunningTime="2025-11-28 15:55:54.409731958 +0000 UTC m=+1884.257338419" watchObservedRunningTime="2025-11-28 15:55:54.411065693 +0000 UTC m=+1884.258672144" Nov 28 15:56:00 crc kubenswrapper[4647]: I1128 15:56:00.461979 4647 generic.go:334] "Generic (PLEG): container finished" podID="123e6be6-0c87-4a2b-8f94-ae8207ccbaa5" containerID="db6c9a309c94cede7b5cc73922aa26e9dbc06a209e23de061402143bab235c3d" exitCode=0 Nov 28 15:56:00 crc kubenswrapper[4647]: I1128 15:56:00.462090 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" event={"ID":"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5","Type":"ContainerDied","Data":"db6c9a309c94cede7b5cc73922aa26e9dbc06a209e23de061402143bab235c3d"} Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.045610 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.073150 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-26mgb"] Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.086488 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-26mgb"] Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.183803 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9k4m5\" (UniqueName: \"kubernetes.io/projected/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-kube-api-access-9k4m5\") pod \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.183885 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-inventory\") pod \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.184113 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-ssh-key\") pod \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\" (UID: \"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5\") " Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.192326 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-kube-api-access-9k4m5" (OuterVolumeSpecName: "kube-api-access-9k4m5") pod "123e6be6-0c87-4a2b-8f94-ae8207ccbaa5" (UID: "123e6be6-0c87-4a2b-8f94-ae8207ccbaa5"). InnerVolumeSpecName "kube-api-access-9k4m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.218232 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "123e6be6-0c87-4a2b-8f94-ae8207ccbaa5" (UID: "123e6be6-0c87-4a2b-8f94-ae8207ccbaa5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.229806 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-inventory" (OuterVolumeSpecName: "inventory") pod "123e6be6-0c87-4a2b-8f94-ae8207ccbaa5" (UID: "123e6be6-0c87-4a2b-8f94-ae8207ccbaa5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.287734 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9k4m5\" (UniqueName: \"kubernetes.io/projected/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-kube-api-access-9k4m5\") on node \"crc\" DevicePath \"\"" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.287789 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.287805 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/123e6be6-0c87-4a2b-8f94-ae8207ccbaa5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.415058 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cba83c78-a603-47a1-89ff-09a1f68196aa" path="/var/lib/kubelet/pods/cba83c78-a603-47a1-89ff-09a1f68196aa/volumes" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.486130 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" event={"ID":"123e6be6-0c87-4a2b-8f94-ae8207ccbaa5","Type":"ContainerDied","Data":"f5c6a039061a432ff04f9841c212227d9381edbf3c447b83607cae045c5c4d86"} Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.486197 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5c6a039061a432ff04f9841c212227d9381edbf3c447b83607cae045c5c4d86" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.486543 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.571835 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp"] Nov 28 15:56:02 crc kubenswrapper[4647]: E1128 15:56:02.572547 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="123e6be6-0c87-4a2b-8f94-ae8207ccbaa5" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.572568 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="123e6be6-0c87-4a2b-8f94-ae8207ccbaa5" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.572756 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="123e6be6-0c87-4a2b-8f94-ae8207ccbaa5" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.573535 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.578994 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.580142 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.580903 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.587496 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.590869 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp"] Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.697194 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-nklbp\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.697279 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-nklbp\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.697382 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dlvk\" (UniqueName: \"kubernetes.io/projected/cbde4c89-6c68-4422-931b-94507dc5376d-kube-api-access-7dlvk\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-nklbp\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.800333 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dlvk\" (UniqueName: \"kubernetes.io/projected/cbde4c89-6c68-4422-931b-94507dc5376d-kube-api-access-7dlvk\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-nklbp\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.800496 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-nklbp\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.800545 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-nklbp\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.811295 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-nklbp\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.812015 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-nklbp\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.820733 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dlvk\" (UniqueName: \"kubernetes.io/projected/cbde4c89-6c68-4422-931b-94507dc5376d-kube-api-access-7dlvk\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-nklbp\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:02 crc kubenswrapper[4647]: I1128 15:56:02.891673 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:03 crc kubenswrapper[4647]: I1128 15:56:03.116291 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xjx2f"] Nov 28 15:56:03 crc kubenswrapper[4647]: I1128 15:56:03.143842 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xjx2f"] Nov 28 15:56:03 crc kubenswrapper[4647]: I1128 15:56:03.586640 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp"] Nov 28 15:56:03 crc kubenswrapper[4647]: W1128 15:56:03.593169 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcbde4c89_6c68_4422_931b_94507dc5376d.slice/crio-af67714fdbeba4cb17fc2e7e9364fb3140b8080a12a2efd9e9347c0edeefc629 WatchSource:0}: Error finding container af67714fdbeba4cb17fc2e7e9364fb3140b8080a12a2efd9e9347c0edeefc629: Status 404 returned error can't find the container with id af67714fdbeba4cb17fc2e7e9364fb3140b8080a12a2efd9e9347c0edeefc629 Nov 28 15:56:04 crc kubenswrapper[4647]: I1128 15:56:04.416962 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cd3da5e-0deb-4634-ac2e-24f2e31088c2" path="/var/lib/kubelet/pods/2cd3da5e-0deb-4634-ac2e-24f2e31088c2/volumes" Nov 28 15:56:04 crc kubenswrapper[4647]: I1128 15:56:04.530040 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" event={"ID":"cbde4c89-6c68-4422-931b-94507dc5376d","Type":"ContainerStarted","Data":"af67714fdbeba4cb17fc2e7e9364fb3140b8080a12a2efd9e9347c0edeefc629"} Nov 28 15:56:05 crc kubenswrapper[4647]: I1128 15:56:05.539269 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" event={"ID":"cbde4c89-6c68-4422-931b-94507dc5376d","Type":"ContainerStarted","Data":"5016da05146fa196220f28ef70d116fdad9e913c3de26a2c0e70c8b2f2022289"} Nov 28 15:56:05 crc kubenswrapper[4647]: I1128 15:56:05.568267 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" podStartSLOduration=2.921306645 podStartE2EDuration="3.56824256s" podCreationTimestamp="2025-11-28 15:56:02 +0000 UTC" firstStartedPulling="2025-11-28 15:56:03.595892064 +0000 UTC m=+1893.443498495" lastFinishedPulling="2025-11-28 15:56:04.242827969 +0000 UTC m=+1894.090434410" observedRunningTime="2025-11-28 15:56:05.560682629 +0000 UTC m=+1895.408289060" watchObservedRunningTime="2025-11-28 15:56:05.56824256 +0000 UTC m=+1895.415848991" Nov 28 15:56:17 crc kubenswrapper[4647]: I1128 15:56:17.514748 4647 scope.go:117] "RemoveContainer" containerID="e7c368331fc1653673540fb85a4dfb18a7e8e4f5d3749a91d4c7184e2b405518" Nov 28 15:56:17 crc kubenswrapper[4647]: I1128 15:56:17.583639 4647 scope.go:117] "RemoveContainer" containerID="6aa6bd29accae665032efa8886f0ec5db3dad456a5157940edb15e3ba07f0b6f" Nov 28 15:56:17 crc kubenswrapper[4647]: I1128 15:56:17.643437 4647 scope.go:117] "RemoveContainer" containerID="f3fb184438fcc931b389fcb7364228a987e5beeba81354ecd3e7d1020ce7821e" Nov 28 15:56:44 crc kubenswrapper[4647]: I1128 15:56:44.730628 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-2csd4" podUID="8ee0a7ea-967a-457c-9d3b-1eb46c99b719" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:56:44 crc kubenswrapper[4647]: I1128 15:56:44.730638 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-2csd4" podUID="8ee0a7ea-967a-457c-9d3b-1eb46c99b719" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:56:45 crc kubenswrapper[4647]: I1128 15:56:45.053886 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-rn7j8"] Nov 28 15:56:45 crc kubenswrapper[4647]: I1128 15:56:45.070393 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-rn7j8"] Nov 28 15:56:46 crc kubenswrapper[4647]: I1128 15:56:46.421516 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a" path="/var/lib/kubelet/pods/6ed4d3cb-26ae-46d0-83f7-8ce669d9fc3a/volumes" Nov 28 15:56:47 crc kubenswrapper[4647]: I1128 15:56:47.022857 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:56:47 crc kubenswrapper[4647]: I1128 15:56:47.022944 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:56:55 crc kubenswrapper[4647]: I1128 15:56:55.122407 4647 generic.go:334] "Generic (PLEG): container finished" podID="cbde4c89-6c68-4422-931b-94507dc5376d" containerID="5016da05146fa196220f28ef70d116fdad9e913c3de26a2c0e70c8b2f2022289" exitCode=0 Nov 28 15:56:55 crc kubenswrapper[4647]: I1128 15:56:55.122676 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" event={"ID":"cbde4c89-6c68-4422-931b-94507dc5376d","Type":"ContainerDied","Data":"5016da05146fa196220f28ef70d116fdad9e913c3de26a2c0e70c8b2f2022289"} Nov 28 15:56:56 crc kubenswrapper[4647]: I1128 15:56:56.646876 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:56 crc kubenswrapper[4647]: I1128 15:56:56.816914 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-inventory\") pod \"cbde4c89-6c68-4422-931b-94507dc5376d\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " Nov 28 15:56:56 crc kubenswrapper[4647]: I1128 15:56:56.817936 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-ssh-key\") pod \"cbde4c89-6c68-4422-931b-94507dc5376d\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " Nov 28 15:56:56 crc kubenswrapper[4647]: I1128 15:56:56.818040 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dlvk\" (UniqueName: \"kubernetes.io/projected/cbde4c89-6c68-4422-931b-94507dc5376d-kube-api-access-7dlvk\") pod \"cbde4c89-6c68-4422-931b-94507dc5376d\" (UID: \"cbde4c89-6c68-4422-931b-94507dc5376d\") " Nov 28 15:56:56 crc kubenswrapper[4647]: I1128 15:56:56.828848 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbde4c89-6c68-4422-931b-94507dc5376d-kube-api-access-7dlvk" (OuterVolumeSpecName: "kube-api-access-7dlvk") pod "cbde4c89-6c68-4422-931b-94507dc5376d" (UID: "cbde4c89-6c68-4422-931b-94507dc5376d"). InnerVolumeSpecName "kube-api-access-7dlvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:56:56 crc kubenswrapper[4647]: I1128 15:56:56.864525 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cbde4c89-6c68-4422-931b-94507dc5376d" (UID: "cbde4c89-6c68-4422-931b-94507dc5376d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:56:56 crc kubenswrapper[4647]: I1128 15:56:56.874793 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-inventory" (OuterVolumeSpecName: "inventory") pod "cbde4c89-6c68-4422-931b-94507dc5376d" (UID: "cbde4c89-6c68-4422-931b-94507dc5376d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:56:56 crc kubenswrapper[4647]: I1128 15:56:56.922041 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:56:56 crc kubenswrapper[4647]: I1128 15:56:56.922081 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbde4c89-6c68-4422-931b-94507dc5376d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:56:56 crc kubenswrapper[4647]: I1128 15:56:56.922096 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dlvk\" (UniqueName: \"kubernetes.io/projected/cbde4c89-6c68-4422-931b-94507dc5376d-kube-api-access-7dlvk\") on node \"crc\" DevicePath \"\"" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.144612 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" event={"ID":"cbde4c89-6c68-4422-931b-94507dc5376d","Type":"ContainerDied","Data":"af67714fdbeba4cb17fc2e7e9364fb3140b8080a12a2efd9e9347c0edeefc629"} Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.144681 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af67714fdbeba4cb17fc2e7e9364fb3140b8080a12a2efd9e9347c0edeefc629" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.144698 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-nklbp" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.282916 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb"] Nov 28 15:56:57 crc kubenswrapper[4647]: E1128 15:56:57.283305 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbde4c89-6c68-4422-931b-94507dc5376d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.283322 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbde4c89-6c68-4422-931b-94507dc5376d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.283562 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbde4c89-6c68-4422-931b-94507dc5376d" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.284139 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.286757 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.288057 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.292165 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.298255 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.300647 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb"] Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.433533 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.433627 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.433714 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cndcw\" (UniqueName: \"kubernetes.io/projected/a8d3c439-b6ee-42bd-96d2-eb725c996b97-kube-api-access-cndcw\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.535870 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.536853 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.536967 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cndcw\" (UniqueName: \"kubernetes.io/projected/a8d3c439-b6ee-42bd-96d2-eb725c996b97-kube-api-access-cndcw\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.541396 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.543047 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.566550 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cndcw\" (UniqueName: \"kubernetes.io/projected/a8d3c439-b6ee-42bd-96d2-eb725c996b97-kube-api-access-cndcw\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:57 crc kubenswrapper[4647]: I1128 15:56:57.602084 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:56:58 crc kubenswrapper[4647]: I1128 15:56:58.097765 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb"] Nov 28 15:56:58 crc kubenswrapper[4647]: I1128 15:56:58.165182 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" event={"ID":"a8d3c439-b6ee-42bd-96d2-eb725c996b97","Type":"ContainerStarted","Data":"acc97fc6c2288fc681880773a63f6848f829b5260c1415a74399e1fdb77ac14a"} Nov 28 15:56:59 crc kubenswrapper[4647]: I1128 15:56:59.176221 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" event={"ID":"a8d3c439-b6ee-42bd-96d2-eb725c996b97","Type":"ContainerStarted","Data":"cf59c383a69c74ee1688e7f4416bcf54fc7501db927aacfd6a3129dc94dc7932"} Nov 28 15:56:59 crc kubenswrapper[4647]: I1128 15:56:59.198251 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" podStartSLOduration=1.641081275 podStartE2EDuration="2.198225732s" podCreationTimestamp="2025-11-28 15:56:57 +0000 UTC" firstStartedPulling="2025-11-28 15:56:58.126338544 +0000 UTC m=+1947.973944965" lastFinishedPulling="2025-11-28 15:56:58.683482991 +0000 UTC m=+1948.531089422" observedRunningTime="2025-11-28 15:56:59.192745316 +0000 UTC m=+1949.040351737" watchObservedRunningTime="2025-11-28 15:56:59.198225732 +0000 UTC m=+1949.045832163" Nov 28 15:57:17 crc kubenswrapper[4647]: I1128 15:57:17.022559 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:57:17 crc kubenswrapper[4647]: I1128 15:57:17.023275 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:57:17 crc kubenswrapper[4647]: I1128 15:57:17.775273 4647 scope.go:117] "RemoveContainer" containerID="f2fa85d4ce8112b42f15c8b82ea2fb49518875673410fc61a22c12326647d965" Nov 28 15:57:47 crc kubenswrapper[4647]: I1128 15:57:47.022599 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:57:47 crc kubenswrapper[4647]: I1128 15:57:47.024076 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:57:47 crc kubenswrapper[4647]: I1128 15:57:47.024129 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 15:57:47 crc kubenswrapper[4647]: I1128 15:57:47.024952 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fbd9955acb6f04109aaef547a7a78f405714e4a2cfc5b9f97b929edb35fff07d"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:57:47 crc kubenswrapper[4647]: I1128 15:57:47.025016 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://fbd9955acb6f04109aaef547a7a78f405714e4a2cfc5b9f97b929edb35fff07d" gracePeriod=600 Nov 28 15:57:47 crc kubenswrapper[4647]: I1128 15:57:47.672241 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="fbd9955acb6f04109aaef547a7a78f405714e4a2cfc5b9f97b929edb35fff07d" exitCode=0 Nov 28 15:57:47 crc kubenswrapper[4647]: I1128 15:57:47.672289 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"fbd9955acb6f04109aaef547a7a78f405714e4a2cfc5b9f97b929edb35fff07d"} Nov 28 15:57:47 crc kubenswrapper[4647]: I1128 15:57:47.672941 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a"} Nov 28 15:57:47 crc kubenswrapper[4647]: I1128 15:57:47.672988 4647 scope.go:117] "RemoveContainer" containerID="09e3732d9a915841d0185dc7ba36f0a0f3e631b58f3ab0d5f6822050f2ab26b0" Nov 28 15:58:05 crc kubenswrapper[4647]: I1128 15:58:05.849370 4647 generic.go:334] "Generic (PLEG): container finished" podID="a8d3c439-b6ee-42bd-96d2-eb725c996b97" containerID="cf59c383a69c74ee1688e7f4416bcf54fc7501db927aacfd6a3129dc94dc7932" exitCode=0 Nov 28 15:58:05 crc kubenswrapper[4647]: I1128 15:58:05.849505 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" event={"ID":"a8d3c439-b6ee-42bd-96d2-eb725c996b97","Type":"ContainerDied","Data":"cf59c383a69c74ee1688e7f4416bcf54fc7501db927aacfd6a3129dc94dc7932"} Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.294598 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.344256 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cndcw\" (UniqueName: \"kubernetes.io/projected/a8d3c439-b6ee-42bd-96d2-eb725c996b97-kube-api-access-cndcw\") pod \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.344478 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-inventory\") pod \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.344533 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-ssh-key\") pod \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\" (UID: \"a8d3c439-b6ee-42bd-96d2-eb725c996b97\") " Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.354456 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8d3c439-b6ee-42bd-96d2-eb725c996b97-kube-api-access-cndcw" (OuterVolumeSpecName: "kube-api-access-cndcw") pod "a8d3c439-b6ee-42bd-96d2-eb725c996b97" (UID: "a8d3c439-b6ee-42bd-96d2-eb725c996b97"). InnerVolumeSpecName "kube-api-access-cndcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.387529 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-inventory" (OuterVolumeSpecName: "inventory") pod "a8d3c439-b6ee-42bd-96d2-eb725c996b97" (UID: "a8d3c439-b6ee-42bd-96d2-eb725c996b97"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.401631 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a8d3c439-b6ee-42bd-96d2-eb725c996b97" (UID: "a8d3c439-b6ee-42bd-96d2-eb725c996b97"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.448905 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cndcw\" (UniqueName: \"kubernetes.io/projected/a8d3c439-b6ee-42bd-96d2-eb725c996b97-kube-api-access-cndcw\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.449055 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.449073 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a8d3c439-b6ee-42bd-96d2-eb725c996b97-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.874967 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" event={"ID":"a8d3c439-b6ee-42bd-96d2-eb725c996b97","Type":"ContainerDied","Data":"acc97fc6c2288fc681880773a63f6848f829b5260c1415a74399e1fdb77ac14a"} Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.875006 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="acc97fc6c2288fc681880773a63f6848f829b5260c1415a74399e1fdb77ac14a" Nov 28 15:58:07 crc kubenswrapper[4647]: I1128 15:58:07.875063 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.019016 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-k8pmm"] Nov 28 15:58:08 crc kubenswrapper[4647]: E1128 15:58:08.019719 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8d3c439-b6ee-42bd-96d2-eb725c996b97" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.019750 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8d3c439-b6ee-42bd-96d2-eb725c996b97" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.020179 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8d3c439-b6ee-42bd-96d2-eb725c996b97" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.021323 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.024208 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.024269 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.024564 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.024837 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.030733 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-k8pmm"] Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.066438 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-k8pmm\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.066538 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw8x7\" (UniqueName: \"kubernetes.io/projected/e27e0d42-24a3-447f-aa49-fc305e1253c0-kube-api-access-bw8x7\") pod \"ssh-known-hosts-edpm-deployment-k8pmm\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.066561 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-k8pmm\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.168625 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw8x7\" (UniqueName: \"kubernetes.io/projected/e27e0d42-24a3-447f-aa49-fc305e1253c0-kube-api-access-bw8x7\") pod \"ssh-known-hosts-edpm-deployment-k8pmm\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.168896 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-k8pmm\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.169108 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-k8pmm\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.177396 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-k8pmm\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.177396 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-k8pmm\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.190678 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw8x7\" (UniqueName: \"kubernetes.io/projected/e27e0d42-24a3-447f-aa49-fc305e1253c0-kube-api-access-bw8x7\") pod \"ssh-known-hosts-edpm-deployment-k8pmm\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.347373 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:08 crc kubenswrapper[4647]: I1128 15:58:08.980306 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-k8pmm"] Nov 28 15:58:09 crc kubenswrapper[4647]: I1128 15:58:09.900842 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" event={"ID":"e27e0d42-24a3-447f-aa49-fc305e1253c0","Type":"ContainerStarted","Data":"f32ae7de426f88ee9a12804fdb2dfe13929f7783dae18bc0f6df920ffdffdc11"} Nov 28 15:58:09 crc kubenswrapper[4647]: I1128 15:58:09.901598 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" event={"ID":"e27e0d42-24a3-447f-aa49-fc305e1253c0","Type":"ContainerStarted","Data":"748d0c8d29fc30a02b101b8eb7c0eec9025692a245600155975336874db666ea"} Nov 28 15:58:09 crc kubenswrapper[4647]: I1128 15:58:09.927002 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" podStartSLOduration=2.402840834 podStartE2EDuration="2.9269734s" podCreationTimestamp="2025-11-28 15:58:07 +0000 UTC" firstStartedPulling="2025-11-28 15:58:08.996076937 +0000 UTC m=+2018.843683358" lastFinishedPulling="2025-11-28 15:58:09.520209473 +0000 UTC m=+2019.367815924" observedRunningTime="2025-11-28 15:58:09.91797151 +0000 UTC m=+2019.765577991" watchObservedRunningTime="2025-11-28 15:58:09.9269734 +0000 UTC m=+2019.774579861" Nov 28 15:58:17 crc kubenswrapper[4647]: I1128 15:58:17.993915 4647 generic.go:334] "Generic (PLEG): container finished" podID="e27e0d42-24a3-447f-aa49-fc305e1253c0" containerID="f32ae7de426f88ee9a12804fdb2dfe13929f7783dae18bc0f6df920ffdffdc11" exitCode=0 Nov 28 15:58:17 crc kubenswrapper[4647]: I1128 15:58:17.994010 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" event={"ID":"e27e0d42-24a3-447f-aa49-fc305e1253c0","Type":"ContainerDied","Data":"f32ae7de426f88ee9a12804fdb2dfe13929f7783dae18bc0f6df920ffdffdc11"} Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.469892 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.519882 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-ssh-key-openstack-edpm-ipam\") pod \"e27e0d42-24a3-447f-aa49-fc305e1253c0\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.519984 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-inventory-0\") pod \"e27e0d42-24a3-447f-aa49-fc305e1253c0\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.520053 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bw8x7\" (UniqueName: \"kubernetes.io/projected/e27e0d42-24a3-447f-aa49-fc305e1253c0-kube-api-access-bw8x7\") pod \"e27e0d42-24a3-447f-aa49-fc305e1253c0\" (UID: \"e27e0d42-24a3-447f-aa49-fc305e1253c0\") " Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.529864 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e27e0d42-24a3-447f-aa49-fc305e1253c0-kube-api-access-bw8x7" (OuterVolumeSpecName: "kube-api-access-bw8x7") pod "e27e0d42-24a3-447f-aa49-fc305e1253c0" (UID: "e27e0d42-24a3-447f-aa49-fc305e1253c0"). InnerVolumeSpecName "kube-api-access-bw8x7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.558488 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "e27e0d42-24a3-447f-aa49-fc305e1253c0" (UID: "e27e0d42-24a3-447f-aa49-fc305e1253c0"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.560586 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e27e0d42-24a3-447f-aa49-fc305e1253c0" (UID: "e27e0d42-24a3-447f-aa49-fc305e1253c0"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.622805 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.622836 4647 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e27e0d42-24a3-447f-aa49-fc305e1253c0-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.622852 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bw8x7\" (UniqueName: \"kubernetes.io/projected/e27e0d42-24a3-447f-aa49-fc305e1253c0-kube-api-access-bw8x7\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.872905 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4ts9q"] Nov 28 15:58:19 crc kubenswrapper[4647]: E1128 15:58:19.873392 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e27e0d42-24a3-447f-aa49-fc305e1253c0" containerName="ssh-known-hosts-edpm-deployment" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.873408 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="e27e0d42-24a3-447f-aa49-fc305e1253c0" containerName="ssh-known-hosts-edpm-deployment" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.873739 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="e27e0d42-24a3-447f-aa49-fc305e1253c0" containerName="ssh-known-hosts-edpm-deployment" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.875556 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.887941 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ts9q"] Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.929377 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-utilities\") pod \"redhat-marketplace-4ts9q\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.929555 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-catalog-content\") pod \"redhat-marketplace-4ts9q\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:19 crc kubenswrapper[4647]: I1128 15:58:19.929589 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wd4k\" (UniqueName: \"kubernetes.io/projected/205a9166-9a76-47bb-a739-f0b7a51350a0-kube-api-access-7wd4k\") pod \"redhat-marketplace-4ts9q\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.016869 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" event={"ID":"e27e0d42-24a3-447f-aa49-fc305e1253c0","Type":"ContainerDied","Data":"748d0c8d29fc30a02b101b8eb7c0eec9025692a245600155975336874db666ea"} Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.016918 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="748d0c8d29fc30a02b101b8eb7c0eec9025692a245600155975336874db666ea" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.016988 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-k8pmm" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.031079 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-catalog-content\") pod \"redhat-marketplace-4ts9q\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.031116 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wd4k\" (UniqueName: \"kubernetes.io/projected/205a9166-9a76-47bb-a739-f0b7a51350a0-kube-api-access-7wd4k\") pod \"redhat-marketplace-4ts9q\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.031220 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-utilities\") pod \"redhat-marketplace-4ts9q\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.031724 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-utilities\") pod \"redhat-marketplace-4ts9q\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.032073 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-catalog-content\") pod \"redhat-marketplace-4ts9q\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.055710 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wd4k\" (UniqueName: \"kubernetes.io/projected/205a9166-9a76-47bb-a739-f0b7a51350a0-kube-api-access-7wd4k\") pod \"redhat-marketplace-4ts9q\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.140354 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh"] Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.141550 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.143523 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.143771 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.145935 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.146194 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.153372 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh"] Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.195820 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.235463 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-tkvrh\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.235617 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztv7h\" (UniqueName: \"kubernetes.io/projected/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-kube-api-access-ztv7h\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-tkvrh\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.235693 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-tkvrh\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.338391 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztv7h\" (UniqueName: \"kubernetes.io/projected/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-kube-api-access-ztv7h\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-tkvrh\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.338471 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-tkvrh\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.338542 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-tkvrh\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.343148 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-tkvrh\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.349510 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-tkvrh\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.359493 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztv7h\" (UniqueName: \"kubernetes.io/projected/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-kube-api-access-ztv7h\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-tkvrh\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.459149 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ts9q"] Nov 28 15:58:20 crc kubenswrapper[4647]: I1128 15:58:20.513237 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:21 crc kubenswrapper[4647]: I1128 15:58:21.027029 4647 generic.go:334] "Generic (PLEG): container finished" podID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerID="61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796" exitCode=0 Nov 28 15:58:21 crc kubenswrapper[4647]: I1128 15:58:21.027104 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ts9q" event={"ID":"205a9166-9a76-47bb-a739-f0b7a51350a0","Type":"ContainerDied","Data":"61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796"} Nov 28 15:58:21 crc kubenswrapper[4647]: I1128 15:58:21.027288 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ts9q" event={"ID":"205a9166-9a76-47bb-a739-f0b7a51350a0","Type":"ContainerStarted","Data":"8adf7b31e3408803bb16604291508f65a2135e7f30a35abcba0692b9cfad9043"} Nov 28 15:58:21 crc kubenswrapper[4647]: W1128 15:58:21.037985 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod965d18f9_ce83_44f5_8ec7_4b13eefa7e30.slice/crio-0a2d195ca8d7047d11c4a1d5e5d32974bc111cc15c1578b006e66ad38bafcb7e WatchSource:0}: Error finding container 0a2d195ca8d7047d11c4a1d5e5d32974bc111cc15c1578b006e66ad38bafcb7e: Status 404 returned error can't find the container with id 0a2d195ca8d7047d11c4a1d5e5d32974bc111cc15c1578b006e66ad38bafcb7e Nov 28 15:58:21 crc kubenswrapper[4647]: I1128 15:58:21.043474 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh"] Nov 28 15:58:22 crc kubenswrapper[4647]: I1128 15:58:22.046856 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ts9q" event={"ID":"205a9166-9a76-47bb-a739-f0b7a51350a0","Type":"ContainerStarted","Data":"47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1"} Nov 28 15:58:22 crc kubenswrapper[4647]: I1128 15:58:22.055031 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" event={"ID":"965d18f9-ce83-44f5-8ec7-4b13eefa7e30","Type":"ContainerStarted","Data":"3c1b62827f23121a3f0575fc34af4060a59ead3cb998b1896922dbd55c002166"} Nov 28 15:58:22 crc kubenswrapper[4647]: I1128 15:58:22.055079 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" event={"ID":"965d18f9-ce83-44f5-8ec7-4b13eefa7e30","Type":"ContainerStarted","Data":"0a2d195ca8d7047d11c4a1d5e5d32974bc111cc15c1578b006e66ad38bafcb7e"} Nov 28 15:58:22 crc kubenswrapper[4647]: I1128 15:58:22.100474 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" podStartSLOduration=1.617681135 podStartE2EDuration="2.100456513s" podCreationTimestamp="2025-11-28 15:58:20 +0000 UTC" firstStartedPulling="2025-11-28 15:58:21.044549435 +0000 UTC m=+2030.892155856" lastFinishedPulling="2025-11-28 15:58:21.527324793 +0000 UTC m=+2031.374931234" observedRunningTime="2025-11-28 15:58:22.096447246 +0000 UTC m=+2031.944053697" watchObservedRunningTime="2025-11-28 15:58:22.100456513 +0000 UTC m=+2031.948062934" Nov 28 15:58:23 crc kubenswrapper[4647]: I1128 15:58:23.073935 4647 generic.go:334] "Generic (PLEG): container finished" podID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerID="47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1" exitCode=0 Nov 28 15:58:23 crc kubenswrapper[4647]: I1128 15:58:23.074049 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ts9q" event={"ID":"205a9166-9a76-47bb-a739-f0b7a51350a0","Type":"ContainerDied","Data":"47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1"} Nov 28 15:58:24 crc kubenswrapper[4647]: I1128 15:58:24.083826 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ts9q" event={"ID":"205a9166-9a76-47bb-a739-f0b7a51350a0","Type":"ContainerStarted","Data":"0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e"} Nov 28 15:58:24 crc kubenswrapper[4647]: I1128 15:58:24.115847 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4ts9q" podStartSLOduration=2.64344362 podStartE2EDuration="5.115830253s" podCreationTimestamp="2025-11-28 15:58:19 +0000 UTC" firstStartedPulling="2025-11-28 15:58:21.029175967 +0000 UTC m=+2030.876782388" lastFinishedPulling="2025-11-28 15:58:23.50156256 +0000 UTC m=+2033.349169021" observedRunningTime="2025-11-28 15:58:24.11119602 +0000 UTC m=+2033.958802461" watchObservedRunningTime="2025-11-28 15:58:24.115830253 +0000 UTC m=+2033.963436664" Nov 28 15:58:30 crc kubenswrapper[4647]: I1128 15:58:30.196480 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:30 crc kubenswrapper[4647]: I1128 15:58:30.196942 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:30 crc kubenswrapper[4647]: I1128 15:58:30.271940 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:31 crc kubenswrapper[4647]: I1128 15:58:31.155697 4647 generic.go:334] "Generic (PLEG): container finished" podID="965d18f9-ce83-44f5-8ec7-4b13eefa7e30" containerID="3c1b62827f23121a3f0575fc34af4060a59ead3cb998b1896922dbd55c002166" exitCode=0 Nov 28 15:58:31 crc kubenswrapper[4647]: I1128 15:58:31.156116 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" event={"ID":"965d18f9-ce83-44f5-8ec7-4b13eefa7e30","Type":"ContainerDied","Data":"3c1b62827f23121a3f0575fc34af4060a59ead3cb998b1896922dbd55c002166"} Nov 28 15:58:31 crc kubenswrapper[4647]: I1128 15:58:31.246264 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:31 crc kubenswrapper[4647]: I1128 15:58:31.857368 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ts9q"] Nov 28 15:58:32 crc kubenswrapper[4647]: I1128 15:58:32.664562 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:32 crc kubenswrapper[4647]: I1128 15:58:32.773488 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-ssh-key\") pod \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " Nov 28 15:58:32 crc kubenswrapper[4647]: I1128 15:58:32.774040 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztv7h\" (UniqueName: \"kubernetes.io/projected/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-kube-api-access-ztv7h\") pod \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " Nov 28 15:58:32 crc kubenswrapper[4647]: I1128 15:58:32.774407 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-inventory\") pod \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\" (UID: \"965d18f9-ce83-44f5-8ec7-4b13eefa7e30\") " Nov 28 15:58:32 crc kubenswrapper[4647]: I1128 15:58:32.782935 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-kube-api-access-ztv7h" (OuterVolumeSpecName: "kube-api-access-ztv7h") pod "965d18f9-ce83-44f5-8ec7-4b13eefa7e30" (UID: "965d18f9-ce83-44f5-8ec7-4b13eefa7e30"). InnerVolumeSpecName "kube-api-access-ztv7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:58:32 crc kubenswrapper[4647]: I1128 15:58:32.818999 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "965d18f9-ce83-44f5-8ec7-4b13eefa7e30" (UID: "965d18f9-ce83-44f5-8ec7-4b13eefa7e30"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:32 crc kubenswrapper[4647]: I1128 15:58:32.829010 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-inventory" (OuterVolumeSpecName: "inventory") pod "965d18f9-ce83-44f5-8ec7-4b13eefa7e30" (UID: "965d18f9-ce83-44f5-8ec7-4b13eefa7e30"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:32 crc kubenswrapper[4647]: I1128 15:58:32.877471 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:32 crc kubenswrapper[4647]: I1128 15:58:32.877796 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztv7h\" (UniqueName: \"kubernetes.io/projected/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-kube-api-access-ztv7h\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:32 crc kubenswrapper[4647]: I1128 15:58:32.877963 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/965d18f9-ce83-44f5-8ec7-4b13eefa7e30-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.188166 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" event={"ID":"965d18f9-ce83-44f5-8ec7-4b13eefa7e30","Type":"ContainerDied","Data":"0a2d195ca8d7047d11c4a1d5e5d32974bc111cc15c1578b006e66ad38bafcb7e"} Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.188575 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a2d195ca8d7047d11c4a1d5e5d32974bc111cc15c1578b006e66ad38bafcb7e" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.188509 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4ts9q" podUID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerName="registry-server" containerID="cri-o://0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e" gracePeriod=2 Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.188176 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-tkvrh" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.339300 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l"] Nov 28 15:58:33 crc kubenswrapper[4647]: E1128 15:58:33.341624 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="965d18f9-ce83-44f5-8ec7-4b13eefa7e30" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.341687 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="965d18f9-ce83-44f5-8ec7-4b13eefa7e30" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.342118 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="965d18f9-ce83-44f5-8ec7-4b13eefa7e30" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.343106 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.345110 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.345430 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.348762 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.348798 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.366330 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l"] Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.389844 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.390194 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndxph\" (UniqueName: \"kubernetes.io/projected/8cfdffa4-b728-4135-a613-7198ffda163d-kube-api-access-ndxph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.390349 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.491694 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.491860 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.491996 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndxph\" (UniqueName: \"kubernetes.io/projected/8cfdffa4-b728-4135-a613-7198ffda163d-kube-api-access-ndxph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.502235 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.529882 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.542663 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndxph\" (UniqueName: \"kubernetes.io/projected/8cfdffa4-b728-4135-a613-7198ffda163d-kube-api-access-ndxph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.687701 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.695399 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.797581 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-utilities\") pod \"205a9166-9a76-47bb-a739-f0b7a51350a0\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.798950 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wd4k\" (UniqueName: \"kubernetes.io/projected/205a9166-9a76-47bb-a739-f0b7a51350a0-kube-api-access-7wd4k\") pod \"205a9166-9a76-47bb-a739-f0b7a51350a0\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.799301 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-catalog-content\") pod \"205a9166-9a76-47bb-a739-f0b7a51350a0\" (UID: \"205a9166-9a76-47bb-a739-f0b7a51350a0\") " Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.798844 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-utilities" (OuterVolumeSpecName: "utilities") pod "205a9166-9a76-47bb-a739-f0b7a51350a0" (UID: "205a9166-9a76-47bb-a739-f0b7a51350a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.800353 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.805913 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/205a9166-9a76-47bb-a739-f0b7a51350a0-kube-api-access-7wd4k" (OuterVolumeSpecName: "kube-api-access-7wd4k") pod "205a9166-9a76-47bb-a739-f0b7a51350a0" (UID: "205a9166-9a76-47bb-a739-f0b7a51350a0"). InnerVolumeSpecName "kube-api-access-7wd4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.863829 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "205a9166-9a76-47bb-a739-f0b7a51350a0" (UID: "205a9166-9a76-47bb-a739-f0b7a51350a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.902340 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/205a9166-9a76-47bb-a739-f0b7a51350a0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:33 crc kubenswrapper[4647]: I1128 15:58:33.902365 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wd4k\" (UniqueName: \"kubernetes.io/projected/205a9166-9a76-47bb-a739-f0b7a51350a0-kube-api-access-7wd4k\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.217223 4647 generic.go:334] "Generic (PLEG): container finished" podID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerID="0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e" exitCode=0 Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.217276 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ts9q" event={"ID":"205a9166-9a76-47bb-a739-f0b7a51350a0","Type":"ContainerDied","Data":"0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e"} Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.217310 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ts9q" event={"ID":"205a9166-9a76-47bb-a739-f0b7a51350a0","Type":"ContainerDied","Data":"8adf7b31e3408803bb16604291508f65a2135e7f30a35abcba0692b9cfad9043"} Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.217333 4647 scope.go:117] "RemoveContainer" containerID="0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.217518 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ts9q" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.257273 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l"] Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.272463 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ts9q"] Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.281255 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ts9q"] Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.284259 4647 scope.go:117] "RemoveContainer" containerID="47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.307498 4647 scope.go:117] "RemoveContainer" containerID="61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.332525 4647 scope.go:117] "RemoveContainer" containerID="0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e" Nov 28 15:58:34 crc kubenswrapper[4647]: E1128 15:58:34.333134 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e\": container with ID starting with 0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e not found: ID does not exist" containerID="0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.333173 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e"} err="failed to get container status \"0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e\": rpc error: code = NotFound desc = could not find container \"0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e\": container with ID starting with 0dd20e72dab843f1b504ffe4b8ac8d92803e7e1c670104e81cdafef681d8866e not found: ID does not exist" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.333224 4647 scope.go:117] "RemoveContainer" containerID="47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1" Nov 28 15:58:34 crc kubenswrapper[4647]: E1128 15:58:34.333716 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1\": container with ID starting with 47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1 not found: ID does not exist" containerID="47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.333762 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1"} err="failed to get container status \"47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1\": rpc error: code = NotFound desc = could not find container \"47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1\": container with ID starting with 47f55698f1655dad55608bf41c35ad0cf0fc8fe4a93f859863648d199065d9e1 not found: ID does not exist" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.333791 4647 scope.go:117] "RemoveContainer" containerID="61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796" Nov 28 15:58:34 crc kubenswrapper[4647]: E1128 15:58:34.334243 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796\": container with ID starting with 61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796 not found: ID does not exist" containerID="61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.334268 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796"} err="failed to get container status \"61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796\": rpc error: code = NotFound desc = could not find container \"61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796\": container with ID starting with 61f08f6c2e883e84f490101658575ebe08c1e24fe719fc87da0d29fe0db9d796 not found: ID does not exist" Nov 28 15:58:34 crc kubenswrapper[4647]: I1128 15:58:34.410219 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="205a9166-9a76-47bb-a739-f0b7a51350a0" path="/var/lib/kubelet/pods/205a9166-9a76-47bb-a739-f0b7a51350a0/volumes" Nov 28 15:58:35 crc kubenswrapper[4647]: I1128 15:58:35.230145 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" event={"ID":"8cfdffa4-b728-4135-a613-7198ffda163d","Type":"ContainerStarted","Data":"905022f9ea219a42ca4d96510ad31ed3f42dbc800849110dedffa10aabf7f2ec"} Nov 28 15:58:35 crc kubenswrapper[4647]: I1128 15:58:35.230391 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" event={"ID":"8cfdffa4-b728-4135-a613-7198ffda163d","Type":"ContainerStarted","Data":"95e2380329e074de105977419ed3346afd5b2dc36e4e686b0accf9876745cde0"} Nov 28 15:58:35 crc kubenswrapper[4647]: I1128 15:58:35.255256 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" podStartSLOduration=1.828686799 podStartE2EDuration="2.25524119s" podCreationTimestamp="2025-11-28 15:58:33 +0000 UTC" firstStartedPulling="2025-11-28 15:58:34.284673943 +0000 UTC m=+2044.132280374" lastFinishedPulling="2025-11-28 15:58:34.711228304 +0000 UTC m=+2044.558834765" observedRunningTime="2025-11-28 15:58:35.248307586 +0000 UTC m=+2045.095914007" watchObservedRunningTime="2025-11-28 15:58:35.25524119 +0000 UTC m=+2045.102847611" Nov 28 15:58:46 crc kubenswrapper[4647]: I1128 15:58:46.345032 4647 generic.go:334] "Generic (PLEG): container finished" podID="8cfdffa4-b728-4135-a613-7198ffda163d" containerID="905022f9ea219a42ca4d96510ad31ed3f42dbc800849110dedffa10aabf7f2ec" exitCode=0 Nov 28 15:58:46 crc kubenswrapper[4647]: I1128 15:58:46.345118 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" event={"ID":"8cfdffa4-b728-4135-a613-7198ffda163d","Type":"ContainerDied","Data":"905022f9ea219a42ca4d96510ad31ed3f42dbc800849110dedffa10aabf7f2ec"} Nov 28 15:58:47 crc kubenswrapper[4647]: I1128 15:58:47.822998 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:47 crc kubenswrapper[4647]: I1128 15:58:47.935123 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndxph\" (UniqueName: \"kubernetes.io/projected/8cfdffa4-b728-4135-a613-7198ffda163d-kube-api-access-ndxph\") pod \"8cfdffa4-b728-4135-a613-7198ffda163d\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " Nov 28 15:58:47 crc kubenswrapper[4647]: I1128 15:58:47.935330 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-inventory\") pod \"8cfdffa4-b728-4135-a613-7198ffda163d\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " Nov 28 15:58:47 crc kubenswrapper[4647]: I1128 15:58:47.935386 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-ssh-key\") pod \"8cfdffa4-b728-4135-a613-7198ffda163d\" (UID: \"8cfdffa4-b728-4135-a613-7198ffda163d\") " Nov 28 15:58:47 crc kubenswrapper[4647]: I1128 15:58:47.942735 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cfdffa4-b728-4135-a613-7198ffda163d-kube-api-access-ndxph" (OuterVolumeSpecName: "kube-api-access-ndxph") pod "8cfdffa4-b728-4135-a613-7198ffda163d" (UID: "8cfdffa4-b728-4135-a613-7198ffda163d"). InnerVolumeSpecName "kube-api-access-ndxph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:58:47 crc kubenswrapper[4647]: I1128 15:58:47.971142 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-inventory" (OuterVolumeSpecName: "inventory") pod "8cfdffa4-b728-4135-a613-7198ffda163d" (UID: "8cfdffa4-b728-4135-a613-7198ffda163d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:47 crc kubenswrapper[4647]: I1128 15:58:47.983969 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8cfdffa4-b728-4135-a613-7198ffda163d" (UID: "8cfdffa4-b728-4135-a613-7198ffda163d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.038603 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndxph\" (UniqueName: \"kubernetes.io/projected/8cfdffa4-b728-4135-a613-7198ffda163d-kube-api-access-ndxph\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.038658 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.038681 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8cfdffa4-b728-4135-a613-7198ffda163d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.367233 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" event={"ID":"8cfdffa4-b728-4135-a613-7198ffda163d","Type":"ContainerDied","Data":"95e2380329e074de105977419ed3346afd5b2dc36e4e686b0accf9876745cde0"} Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.367559 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95e2380329e074de105977419ed3346afd5b2dc36e4e686b0accf9876745cde0" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.367628 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.512139 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq"] Nov 28 15:58:48 crc kubenswrapper[4647]: E1128 15:58:48.515476 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerName="extract-content" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.516024 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerName="extract-content" Nov 28 15:58:48 crc kubenswrapper[4647]: E1128 15:58:48.516130 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cfdffa4-b728-4135-a613-7198ffda163d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.516223 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cfdffa4-b728-4135-a613-7198ffda163d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:58:48 crc kubenswrapper[4647]: E1128 15:58:48.516347 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerName="extract-utilities" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.516456 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerName="extract-utilities" Nov 28 15:58:48 crc kubenswrapper[4647]: E1128 15:58:48.516545 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerName="registry-server" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.516611 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerName="registry-server" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.516957 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="205a9166-9a76-47bb-a739-f0b7a51350a0" containerName="registry-server" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.517101 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cfdffa4-b728-4135-a613-7198ffda163d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.518052 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.526604 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.526751 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.526891 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.528780 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.528962 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.529030 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.529088 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.530352 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq"] Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.530641 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548483 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548535 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548561 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548595 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548627 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548670 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548722 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rtv9\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-kube-api-access-5rtv9\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548762 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548785 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548808 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548857 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548883 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548914 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.548946 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651342 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651638 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651657 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651708 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651733 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651756 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651794 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rtv9\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-kube-api-access-5rtv9\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651829 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651850 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651872 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651896 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651918 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651942 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.651970 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.659814 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.660175 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.660729 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.661836 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.666086 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.667392 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.667725 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.669291 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.670192 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.670399 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.674816 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rtv9\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-kube-api-access-5rtv9\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.675712 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.676288 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.676758 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-d78nq\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:48 crc kubenswrapper[4647]: I1128 15:58:48.846796 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:58:49 crc kubenswrapper[4647]: I1128 15:58:49.392897 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq"] Nov 28 15:58:49 crc kubenswrapper[4647]: W1128 15:58:49.414993 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4cb1ba80_4e50_4753_9887_3e420c825d2a.slice/crio-5fd9b937cce3f597774f0a2a62b666b16084071e2ebbaa264581b88d84cf016c WatchSource:0}: Error finding container 5fd9b937cce3f597774f0a2a62b666b16084071e2ebbaa264581b88d84cf016c: Status 404 returned error can't find the container with id 5fd9b937cce3f597774f0a2a62b666b16084071e2ebbaa264581b88d84cf016c Nov 28 15:58:50 crc kubenswrapper[4647]: I1128 15:58:50.388088 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" event={"ID":"4cb1ba80-4e50-4753-9887-3e420c825d2a","Type":"ContainerStarted","Data":"5fd9b937cce3f597774f0a2a62b666b16084071e2ebbaa264581b88d84cf016c"} Nov 28 15:58:51 crc kubenswrapper[4647]: I1128 15:58:51.397632 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" event={"ID":"4cb1ba80-4e50-4753-9887-3e420c825d2a","Type":"ContainerStarted","Data":"af3e8c146b61619bcf00af34d47a7110eed2988094125a360fba2d95c5cfaf18"} Nov 28 15:58:51 crc kubenswrapper[4647]: I1128 15:58:51.430643 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" podStartSLOduration=2.712181054 podStartE2EDuration="3.430618998s" podCreationTimestamp="2025-11-28 15:58:48 +0000 UTC" firstStartedPulling="2025-11-28 15:58:49.417752853 +0000 UTC m=+2059.265359274" lastFinishedPulling="2025-11-28 15:58:50.136190767 +0000 UTC m=+2059.983797218" observedRunningTime="2025-11-28 15:58:51.420727024 +0000 UTC m=+2061.268333455" watchObservedRunningTime="2025-11-28 15:58:51.430618998 +0000 UTC m=+2061.278225419" Nov 28 15:59:35 crc kubenswrapper[4647]: I1128 15:59:35.860096 4647 generic.go:334] "Generic (PLEG): container finished" podID="4cb1ba80-4e50-4753-9887-3e420c825d2a" containerID="af3e8c146b61619bcf00af34d47a7110eed2988094125a360fba2d95c5cfaf18" exitCode=0 Nov 28 15:59:35 crc kubenswrapper[4647]: I1128 15:59:35.860622 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" event={"ID":"4cb1ba80-4e50-4753-9887-3e420c825d2a","Type":"ContainerDied","Data":"af3e8c146b61619bcf00af34d47a7110eed2988094125a360fba2d95c5cfaf18"} Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.374238 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519047 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-libvirt-combined-ca-bundle\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519106 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-bootstrap-combined-ca-bundle\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519132 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rtv9\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-kube-api-access-5rtv9\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519174 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519232 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519261 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519289 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-nova-combined-ca-bundle\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519319 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ovn-combined-ca-bundle\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519396 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-repo-setup-combined-ca-bundle\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519443 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ssh-key\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519532 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-neutron-metadata-combined-ca-bundle\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519574 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-telemetry-combined-ca-bundle\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519656 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-inventory\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.519683 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"4cb1ba80-4e50-4753-9887-3e420c825d2a\" (UID: \"4cb1ba80-4e50-4753-9887-3e420c825d2a\") " Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.528743 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.531355 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.531447 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.531456 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.531610 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-kube-api-access-5rtv9" (OuterVolumeSpecName: "kube-api-access-5rtv9") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "kube-api-access-5rtv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.533322 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.533800 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.534522 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.534584 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.536184 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.548007 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.551790 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.565574 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.566582 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-inventory" (OuterVolumeSpecName: "inventory") pod "4cb1ba80-4e50-4753-9887-3e420c825d2a" (UID: "4cb1ba80-4e50-4753-9887-3e420c825d2a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622016 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622590 4647 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622614 4647 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622629 4647 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622641 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rtv9\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-kube-api-access-5rtv9\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622654 4647 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622669 4647 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622684 4647 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4cb1ba80-4e50-4753-9887-3e420c825d2a-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622697 4647 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622709 4647 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622719 4647 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622730 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622744 4647 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.622755 4647 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb1ba80-4e50-4753-9887-3e420c825d2a-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.882944 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" event={"ID":"4cb1ba80-4e50-4753-9887-3e420c825d2a","Type":"ContainerDied","Data":"5fd9b937cce3f597774f0a2a62b666b16084071e2ebbaa264581b88d84cf016c"} Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.882987 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5fd9b937cce3f597774f0a2a62b666b16084071e2ebbaa264581b88d84cf016c" Nov 28 15:59:37 crc kubenswrapper[4647]: I1128 15:59:37.883027 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-d78nq" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.016959 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm"] Nov 28 15:59:38 crc kubenswrapper[4647]: E1128 15:59:38.017403 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cb1ba80-4e50-4753-9887-3e420c825d2a" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.017436 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cb1ba80-4e50-4753-9887-3e420c825d2a" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.017633 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cb1ba80-4e50-4753-9887-3e420c825d2a" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.018526 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.024841 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.024945 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.025031 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.031969 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.032175 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.034056 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm"] Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.133381 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgknb\" (UniqueName: \"kubernetes.io/projected/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-kube-api-access-dgknb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.133468 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.133708 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.134274 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.134577 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.236805 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.237048 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.237213 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgknb\" (UniqueName: \"kubernetes.io/projected/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-kube-api-access-dgknb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.237287 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.237381 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.242758 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.242832 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.244542 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.246378 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.264323 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgknb\" (UniqueName: \"kubernetes.io/projected/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-kube-api-access-dgknb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rrkrm\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.338348 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.718119 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm"] Nov 28 15:59:38 crc kubenswrapper[4647]: I1128 15:59:38.892618 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" event={"ID":"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a","Type":"ContainerStarted","Data":"7d412630536a4a64e15d3d82f324e07707ff3ae972f4be1722e53fc32f8e83af"} Nov 28 15:59:39 crc kubenswrapper[4647]: I1128 15:59:39.905050 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" event={"ID":"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a","Type":"ContainerStarted","Data":"a6d83285a3fb64433296f5235b6d11e1c51d2aedf2c802c8ef33b19d662488ce"} Nov 28 15:59:39 crc kubenswrapper[4647]: I1128 15:59:39.937728 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" podStartSLOduration=2.326624613 podStartE2EDuration="2.937686851s" podCreationTimestamp="2025-11-28 15:59:37 +0000 UTC" firstStartedPulling="2025-11-28 15:59:38.729029582 +0000 UTC m=+2108.576636003" lastFinishedPulling="2025-11-28 15:59:39.34009182 +0000 UTC m=+2109.187698241" observedRunningTime="2025-11-28 15:59:39.92711825 +0000 UTC m=+2109.774724711" watchObservedRunningTime="2025-11-28 15:59:39.937686851 +0000 UTC m=+2109.785293292" Nov 28 15:59:47 crc kubenswrapper[4647]: I1128 15:59:47.023138 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:59:47 crc kubenswrapper[4647]: I1128 15:59:47.023645 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.742341 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mfnqs"] Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.747082 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.759812 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mfnqs"] Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.769377 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-utilities\") pod \"certified-operators-mfnqs\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.769660 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-catalog-content\") pod \"certified-operators-mfnqs\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.769961 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78fv8\" (UniqueName: \"kubernetes.io/projected/021d6756-e0dd-437c-b08f-26b3fdd67beb-kube-api-access-78fv8\") pod \"certified-operators-mfnqs\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.872350 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78fv8\" (UniqueName: \"kubernetes.io/projected/021d6756-e0dd-437c-b08f-26b3fdd67beb-kube-api-access-78fv8\") pod \"certified-operators-mfnqs\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.872730 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-utilities\") pod \"certified-operators-mfnqs\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.872864 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-catalog-content\") pod \"certified-operators-mfnqs\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.873147 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-utilities\") pod \"certified-operators-mfnqs\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.873250 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-catalog-content\") pod \"certified-operators-mfnqs\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:55 crc kubenswrapper[4647]: I1128 15:59:55.906173 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78fv8\" (UniqueName: \"kubernetes.io/projected/021d6756-e0dd-437c-b08f-26b3fdd67beb-kube-api-access-78fv8\") pod \"certified-operators-mfnqs\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:56 crc kubenswrapper[4647]: I1128 15:59:56.085728 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 15:59:56 crc kubenswrapper[4647]: I1128 15:59:56.711730 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mfnqs"] Nov 28 15:59:57 crc kubenswrapper[4647]: E1128 15:59:57.061490 4647 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod021d6756_e0dd_437c_b08f_26b3fdd67beb.slice/crio-conmon-ce8fb754a78eecb9d18da9a48fa566cc975ca478c4170dc36927d83a9733b89d.scope\": RecentStats: unable to find data in memory cache]" Nov 28 15:59:57 crc kubenswrapper[4647]: I1128 15:59:57.094991 4647 generic.go:334] "Generic (PLEG): container finished" podID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerID="ce8fb754a78eecb9d18da9a48fa566cc975ca478c4170dc36927d83a9733b89d" exitCode=0 Nov 28 15:59:57 crc kubenswrapper[4647]: I1128 15:59:57.095197 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mfnqs" event={"ID":"021d6756-e0dd-437c-b08f-26b3fdd67beb","Type":"ContainerDied","Data":"ce8fb754a78eecb9d18da9a48fa566cc975ca478c4170dc36927d83a9733b89d"} Nov 28 15:59:57 crc kubenswrapper[4647]: I1128 15:59:57.095237 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mfnqs" event={"ID":"021d6756-e0dd-437c-b08f-26b3fdd67beb","Type":"ContainerStarted","Data":"f44c7a7d88fa51fe80827e02eb3c6de7b983371554d0da264bf0ec9c36abd682"} Nov 28 15:59:59 crc kubenswrapper[4647]: I1128 15:59:59.129072 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mfnqs" event={"ID":"021d6756-e0dd-437c-b08f-26b3fdd67beb","Type":"ContainerStarted","Data":"476c532e36875904d540600aaeef7d114ad2fb357d2cb8e9feba5fd8045de51d"} Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.144287 4647 generic.go:334] "Generic (PLEG): container finished" podID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerID="476c532e36875904d540600aaeef7d114ad2fb357d2cb8e9feba5fd8045de51d" exitCode=0 Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.144520 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mfnqs" event={"ID":"021d6756-e0dd-437c-b08f-26b3fdd67beb","Type":"ContainerDied","Data":"476c532e36875904d540600aaeef7d114ad2fb357d2cb8e9feba5fd8045de51d"} Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.166485 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf"] Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.167885 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.176121 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.176662 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.185836 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf"] Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.277556 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/75c74919-4a74-44fb-983f-1d4780061e77-secret-volume\") pod \"collect-profiles-29405760-jtxxf\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.277607 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ct4l\" (UniqueName: \"kubernetes.io/projected/75c74919-4a74-44fb-983f-1d4780061e77-kube-api-access-7ct4l\") pod \"collect-profiles-29405760-jtxxf\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.277645 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/75c74919-4a74-44fb-983f-1d4780061e77-config-volume\") pod \"collect-profiles-29405760-jtxxf\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.379697 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/75c74919-4a74-44fb-983f-1d4780061e77-secret-volume\") pod \"collect-profiles-29405760-jtxxf\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.379741 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ct4l\" (UniqueName: \"kubernetes.io/projected/75c74919-4a74-44fb-983f-1d4780061e77-kube-api-access-7ct4l\") pod \"collect-profiles-29405760-jtxxf\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.379768 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/75c74919-4a74-44fb-983f-1d4780061e77-config-volume\") pod \"collect-profiles-29405760-jtxxf\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.380891 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/75c74919-4a74-44fb-983f-1d4780061e77-config-volume\") pod \"collect-profiles-29405760-jtxxf\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.386604 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/75c74919-4a74-44fb-983f-1d4780061e77-secret-volume\") pod \"collect-profiles-29405760-jtxxf\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.401095 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ct4l\" (UniqueName: \"kubernetes.io/projected/75c74919-4a74-44fb-983f-1d4780061e77-kube-api-access-7ct4l\") pod \"collect-profiles-29405760-jtxxf\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:00 crc kubenswrapper[4647]: I1128 16:00:00.516721 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:01 crc kubenswrapper[4647]: I1128 16:00:01.727542 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf"] Nov 28 16:00:01 crc kubenswrapper[4647]: W1128 16:00:01.757559 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75c74919_4a74_44fb_983f_1d4780061e77.slice/crio-3d6d7f7f6bdfee2e111337023e7a2283ac0a2c9100ba45d0ecb59f92ab9d8932 WatchSource:0}: Error finding container 3d6d7f7f6bdfee2e111337023e7a2283ac0a2c9100ba45d0ecb59f92ab9d8932: Status 404 returned error can't find the container with id 3d6d7f7f6bdfee2e111337023e7a2283ac0a2c9100ba45d0ecb59f92ab9d8932 Nov 28 16:00:02 crc kubenswrapper[4647]: I1128 16:00:02.172971 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mfnqs" event={"ID":"021d6756-e0dd-437c-b08f-26b3fdd67beb","Type":"ContainerStarted","Data":"d0c940108e02092f2cee95acc80e66110c92462f1cb089cd31e97d5c39618931"} Nov 28 16:00:02 crc kubenswrapper[4647]: I1128 16:00:02.175626 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" event={"ID":"75c74919-4a74-44fb-983f-1d4780061e77","Type":"ContainerStarted","Data":"3fcd2f6dec339d4e20cecee8182a3d050db325cc6fcec8a9ac81326d11b96fec"} Nov 28 16:00:02 crc kubenswrapper[4647]: I1128 16:00:02.175688 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" event={"ID":"75c74919-4a74-44fb-983f-1d4780061e77","Type":"ContainerStarted","Data":"3d6d7f7f6bdfee2e111337023e7a2283ac0a2c9100ba45d0ecb59f92ab9d8932"} Nov 28 16:00:02 crc kubenswrapper[4647]: I1128 16:00:02.232030 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" podStartSLOduration=2.2320117760000002 podStartE2EDuration="2.232011776s" podCreationTimestamp="2025-11-28 16:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:00:02.22837125 +0000 UTC m=+2132.075977671" watchObservedRunningTime="2025-11-28 16:00:02.232011776 +0000 UTC m=+2132.079618197" Nov 28 16:00:02 crc kubenswrapper[4647]: I1128 16:00:02.232353 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mfnqs" podStartSLOduration=2.490471634 podStartE2EDuration="7.232348865s" podCreationTimestamp="2025-11-28 15:59:55 +0000 UTC" firstStartedPulling="2025-11-28 15:59:57.097852694 +0000 UTC m=+2126.945459135" lastFinishedPulling="2025-11-28 16:00:01.839729945 +0000 UTC m=+2131.687336366" observedRunningTime="2025-11-28 16:00:02.206377085 +0000 UTC m=+2132.053983516" watchObservedRunningTime="2025-11-28 16:00:02.232348865 +0000 UTC m=+2132.079955286" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.108308 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sg2pd"] Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.112917 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.121012 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sg2pd"] Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.198977 4647 generic.go:334] "Generic (PLEG): container finished" podID="75c74919-4a74-44fb-983f-1d4780061e77" containerID="3fcd2f6dec339d4e20cecee8182a3d050db325cc6fcec8a9ac81326d11b96fec" exitCode=0 Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.200242 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" event={"ID":"75c74919-4a74-44fb-983f-1d4780061e77","Type":"ContainerDied","Data":"3fcd2f6dec339d4e20cecee8182a3d050db325cc6fcec8a9ac81326d11b96fec"} Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.255008 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-utilities\") pod \"community-operators-sg2pd\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.255072 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-catalog-content\") pod \"community-operators-sg2pd\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.255331 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqfc8\" (UniqueName: \"kubernetes.io/projected/b82c047f-7d51-4057-bf9c-c96f63de8a40-kube-api-access-sqfc8\") pod \"community-operators-sg2pd\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.358487 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqfc8\" (UniqueName: \"kubernetes.io/projected/b82c047f-7d51-4057-bf9c-c96f63de8a40-kube-api-access-sqfc8\") pod \"community-operators-sg2pd\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.358618 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-utilities\") pod \"community-operators-sg2pd\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.358646 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-catalog-content\") pod \"community-operators-sg2pd\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.359157 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-utilities\") pod \"community-operators-sg2pd\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.359263 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-catalog-content\") pod \"community-operators-sg2pd\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.398344 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqfc8\" (UniqueName: \"kubernetes.io/projected/b82c047f-7d51-4057-bf9c-c96f63de8a40-kube-api-access-sqfc8\") pod \"community-operators-sg2pd\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:03 crc kubenswrapper[4647]: I1128 16:00:03.470686 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.111398 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sg2pd"] Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.217335 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sg2pd" event={"ID":"b82c047f-7d51-4057-bf9c-c96f63de8a40","Type":"ContainerStarted","Data":"b70126e3bcb7df8b66df50344d7fce15aac1fe50fd12bd160b69a48595a50e08"} Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.640636 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.794254 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q"] Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.796941 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/75c74919-4a74-44fb-983f-1d4780061e77-secret-volume\") pod \"75c74919-4a74-44fb-983f-1d4780061e77\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.797163 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ct4l\" (UniqueName: \"kubernetes.io/projected/75c74919-4a74-44fb-983f-1d4780061e77-kube-api-access-7ct4l\") pod \"75c74919-4a74-44fb-983f-1d4780061e77\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.797201 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/75c74919-4a74-44fb-983f-1d4780061e77-config-volume\") pod \"75c74919-4a74-44fb-983f-1d4780061e77\" (UID: \"75c74919-4a74-44fb-983f-1d4780061e77\") " Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.798199 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75c74919-4a74-44fb-983f-1d4780061e77-config-volume" (OuterVolumeSpecName: "config-volume") pod "75c74919-4a74-44fb-983f-1d4780061e77" (UID: "75c74919-4a74-44fb-983f-1d4780061e77"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.804330 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75c74919-4a74-44fb-983f-1d4780061e77-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "75c74919-4a74-44fb-983f-1d4780061e77" (UID: "75c74919-4a74-44fb-983f-1d4780061e77"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.805545 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-xc84q"] Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.805620 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75c74919-4a74-44fb-983f-1d4780061e77-kube-api-access-7ct4l" (OuterVolumeSpecName: "kube-api-access-7ct4l") pod "75c74919-4a74-44fb-983f-1d4780061e77" (UID: "75c74919-4a74-44fb-983f-1d4780061e77"). InnerVolumeSpecName "kube-api-access-7ct4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.899301 4647 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/75c74919-4a74-44fb-983f-1d4780061e77-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.899629 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ct4l\" (UniqueName: \"kubernetes.io/projected/75c74919-4a74-44fb-983f-1d4780061e77-kube-api-access-7ct4l\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:04 crc kubenswrapper[4647]: I1128 16:00:04.899690 4647 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/75c74919-4a74-44fb-983f-1d4780061e77-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:05 crc kubenswrapper[4647]: I1128 16:00:05.228316 4647 generic.go:334] "Generic (PLEG): container finished" podID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerID="1d216006dbf397ea4988fd3cc0844b342c57349bb57d3744d2b49d3a850aac98" exitCode=0 Nov 28 16:00:05 crc kubenswrapper[4647]: I1128 16:00:05.228438 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sg2pd" event={"ID":"b82c047f-7d51-4057-bf9c-c96f63de8a40","Type":"ContainerDied","Data":"1d216006dbf397ea4988fd3cc0844b342c57349bb57d3744d2b49d3a850aac98"} Nov 28 16:00:05 crc kubenswrapper[4647]: I1128 16:00:05.230711 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" event={"ID":"75c74919-4a74-44fb-983f-1d4780061e77","Type":"ContainerDied","Data":"3d6d7f7f6bdfee2e111337023e7a2283ac0a2c9100ba45d0ecb59f92ab9d8932"} Nov 28 16:00:05 crc kubenswrapper[4647]: I1128 16:00:05.230805 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf" Nov 28 16:00:05 crc kubenswrapper[4647]: I1128 16:00:05.237819 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d6d7f7f6bdfee2e111337023e7a2283ac0a2c9100ba45d0ecb59f92ab9d8932" Nov 28 16:00:06 crc kubenswrapper[4647]: I1128 16:00:06.086665 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 16:00:06 crc kubenswrapper[4647]: I1128 16:00:06.087166 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 16:00:06 crc kubenswrapper[4647]: I1128 16:00:06.182978 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 16:00:06 crc kubenswrapper[4647]: I1128 16:00:06.409730 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6" path="/var/lib/kubelet/pods/f5e71bcd-07ad-4ec9-a9e1-aafd119c8fd6/volumes" Nov 28 16:00:07 crc kubenswrapper[4647]: I1128 16:00:07.264880 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sg2pd" event={"ID":"b82c047f-7d51-4057-bf9c-c96f63de8a40","Type":"ContainerStarted","Data":"79342d240315b133c761a2bcb094b3e08080b3dfe59191fa5e061d73bc5b03a4"} Nov 28 16:00:08 crc kubenswrapper[4647]: I1128 16:00:08.275595 4647 generic.go:334] "Generic (PLEG): container finished" podID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerID="79342d240315b133c761a2bcb094b3e08080b3dfe59191fa5e061d73bc5b03a4" exitCode=0 Nov 28 16:00:08 crc kubenswrapper[4647]: I1128 16:00:08.275639 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sg2pd" event={"ID":"b82c047f-7d51-4057-bf9c-c96f63de8a40","Type":"ContainerDied","Data":"79342d240315b133c761a2bcb094b3e08080b3dfe59191fa5e061d73bc5b03a4"} Nov 28 16:00:10 crc kubenswrapper[4647]: I1128 16:00:10.295306 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sg2pd" event={"ID":"b82c047f-7d51-4057-bf9c-c96f63de8a40","Type":"ContainerStarted","Data":"bb1e9d2a58f8d6564ba414449a881672e033a4d7000de5da3d97249b9e3c7e63"} Nov 28 16:00:10 crc kubenswrapper[4647]: I1128 16:00:10.326659 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sg2pd" podStartSLOduration=2.93985663 podStartE2EDuration="7.326637139s" podCreationTimestamp="2025-11-28 16:00:03 +0000 UTC" firstStartedPulling="2025-11-28 16:00:05.230350164 +0000 UTC m=+2135.077956615" lastFinishedPulling="2025-11-28 16:00:09.617130693 +0000 UTC m=+2139.464737124" observedRunningTime="2025-11-28 16:00:10.324992666 +0000 UTC m=+2140.172599087" watchObservedRunningTime="2025-11-28 16:00:10.326637139 +0000 UTC m=+2140.174243560" Nov 28 16:00:13 crc kubenswrapper[4647]: I1128 16:00:13.471273 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:13 crc kubenswrapper[4647]: I1128 16:00:13.473135 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:13 crc kubenswrapper[4647]: I1128 16:00:13.526263 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:14 crc kubenswrapper[4647]: I1128 16:00:14.409696 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:14 crc kubenswrapper[4647]: I1128 16:00:14.486487 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sg2pd"] Nov 28 16:00:16 crc kubenswrapper[4647]: I1128 16:00:16.148985 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 16:00:16 crc kubenswrapper[4647]: I1128 16:00:16.236770 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mfnqs"] Nov 28 16:00:16 crc kubenswrapper[4647]: I1128 16:00:16.364861 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sg2pd" podUID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerName="registry-server" containerID="cri-o://bb1e9d2a58f8d6564ba414449a881672e033a4d7000de5da3d97249b9e3c7e63" gracePeriod=2 Nov 28 16:00:16 crc kubenswrapper[4647]: I1128 16:00:16.364980 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mfnqs" podUID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerName="registry-server" containerID="cri-o://d0c940108e02092f2cee95acc80e66110c92462f1cb089cd31e97d5c39618931" gracePeriod=2 Nov 28 16:00:17 crc kubenswrapper[4647]: I1128 16:00:17.022649 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:00:17 crc kubenswrapper[4647]: I1128 16:00:17.022759 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:00:17 crc kubenswrapper[4647]: I1128 16:00:17.979067 4647 scope.go:117] "RemoveContainer" containerID="ad85127e24b570fc14376b2b3dcccc013586fbd7d0a56b910c1a95adb7ea43d2" Nov 28 16:00:18 crc kubenswrapper[4647]: I1128 16:00:18.388272 4647 generic.go:334] "Generic (PLEG): container finished" podID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerID="d0c940108e02092f2cee95acc80e66110c92462f1cb089cd31e97d5c39618931" exitCode=0 Nov 28 16:00:18 crc kubenswrapper[4647]: I1128 16:00:18.388363 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mfnqs" event={"ID":"021d6756-e0dd-437c-b08f-26b3fdd67beb","Type":"ContainerDied","Data":"d0c940108e02092f2cee95acc80e66110c92462f1cb089cd31e97d5c39618931"} Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.041149 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.138904 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-utilities\") pod \"021d6756-e0dd-437c-b08f-26b3fdd67beb\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.139183 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-catalog-content\") pod \"021d6756-e0dd-437c-b08f-26b3fdd67beb\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.139346 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78fv8\" (UniqueName: \"kubernetes.io/projected/021d6756-e0dd-437c-b08f-26b3fdd67beb-kube-api-access-78fv8\") pod \"021d6756-e0dd-437c-b08f-26b3fdd67beb\" (UID: \"021d6756-e0dd-437c-b08f-26b3fdd67beb\") " Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.139821 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-utilities" (OuterVolumeSpecName: "utilities") pod "021d6756-e0dd-437c-b08f-26b3fdd67beb" (UID: "021d6756-e0dd-437c-b08f-26b3fdd67beb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.140188 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.169816 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/021d6756-e0dd-437c-b08f-26b3fdd67beb-kube-api-access-78fv8" (OuterVolumeSpecName: "kube-api-access-78fv8") pod "021d6756-e0dd-437c-b08f-26b3fdd67beb" (UID: "021d6756-e0dd-437c-b08f-26b3fdd67beb"). InnerVolumeSpecName "kube-api-access-78fv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.187742 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "021d6756-e0dd-437c-b08f-26b3fdd67beb" (UID: "021d6756-e0dd-437c-b08f-26b3fdd67beb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.242576 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/021d6756-e0dd-437c-b08f-26b3fdd67beb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.242605 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78fv8\" (UniqueName: \"kubernetes.io/projected/021d6756-e0dd-437c-b08f-26b3fdd67beb-kube-api-access-78fv8\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.408546 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mfnqs" event={"ID":"021d6756-e0dd-437c-b08f-26b3fdd67beb","Type":"ContainerDied","Data":"f44c7a7d88fa51fe80827e02eb3c6de7b983371554d0da264bf0ec9c36abd682"} Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.409210 4647 scope.go:117] "RemoveContainer" containerID="d0c940108e02092f2cee95acc80e66110c92462f1cb089cd31e97d5c39618931" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.409339 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mfnqs" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.419369 4647 generic.go:334] "Generic (PLEG): container finished" podID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerID="bb1e9d2a58f8d6564ba414449a881672e033a4d7000de5da3d97249b9e3c7e63" exitCode=0 Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.419430 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sg2pd" event={"ID":"b82c047f-7d51-4057-bf9c-c96f63de8a40","Type":"ContainerDied","Data":"bb1e9d2a58f8d6564ba414449a881672e033a4d7000de5da3d97249b9e3c7e63"} Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.463049 4647 scope.go:117] "RemoveContainer" containerID="476c532e36875904d540600aaeef7d114ad2fb357d2cb8e9feba5fd8045de51d" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.473126 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mfnqs"] Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.481574 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mfnqs"] Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.571344 4647 scope.go:117] "RemoveContainer" containerID="ce8fb754a78eecb9d18da9a48fa566cc975ca478c4170dc36927d83a9733b89d" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.606374 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.659867 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-utilities\") pod \"b82c047f-7d51-4057-bf9c-c96f63de8a40\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.660090 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-catalog-content\") pod \"b82c047f-7d51-4057-bf9c-c96f63de8a40\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.660197 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqfc8\" (UniqueName: \"kubernetes.io/projected/b82c047f-7d51-4057-bf9c-c96f63de8a40-kube-api-access-sqfc8\") pod \"b82c047f-7d51-4057-bf9c-c96f63de8a40\" (UID: \"b82c047f-7d51-4057-bf9c-c96f63de8a40\") " Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.662142 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-utilities" (OuterVolumeSpecName: "utilities") pod "b82c047f-7d51-4057-bf9c-c96f63de8a40" (UID: "b82c047f-7d51-4057-bf9c-c96f63de8a40"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.665729 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b82c047f-7d51-4057-bf9c-c96f63de8a40-kube-api-access-sqfc8" (OuterVolumeSpecName: "kube-api-access-sqfc8") pod "b82c047f-7d51-4057-bf9c-c96f63de8a40" (UID: "b82c047f-7d51-4057-bf9c-c96f63de8a40"). InnerVolumeSpecName "kube-api-access-sqfc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.723294 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b82c047f-7d51-4057-bf9c-c96f63de8a40" (UID: "b82c047f-7d51-4057-bf9c-c96f63de8a40"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.762968 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.763009 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqfc8\" (UniqueName: \"kubernetes.io/projected/b82c047f-7d51-4057-bf9c-c96f63de8a40-kube-api-access-sqfc8\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:19 crc kubenswrapper[4647]: I1128 16:00:19.763021 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b82c047f-7d51-4057-bf9c-c96f63de8a40-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:20 crc kubenswrapper[4647]: I1128 16:00:20.406591 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="021d6756-e0dd-437c-b08f-26b3fdd67beb" path="/var/lib/kubelet/pods/021d6756-e0dd-437c-b08f-26b3fdd67beb/volumes" Nov 28 16:00:20 crc kubenswrapper[4647]: I1128 16:00:20.432064 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sg2pd" event={"ID":"b82c047f-7d51-4057-bf9c-c96f63de8a40","Type":"ContainerDied","Data":"b70126e3bcb7df8b66df50344d7fce15aac1fe50fd12bd160b69a48595a50e08"} Nov 28 16:00:20 crc kubenswrapper[4647]: I1128 16:00:20.432081 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sg2pd" Nov 28 16:00:20 crc kubenswrapper[4647]: I1128 16:00:20.432814 4647 scope.go:117] "RemoveContainer" containerID="bb1e9d2a58f8d6564ba414449a881672e033a4d7000de5da3d97249b9e3c7e63" Nov 28 16:00:20 crc kubenswrapper[4647]: I1128 16:00:20.465122 4647 scope.go:117] "RemoveContainer" containerID="79342d240315b133c761a2bcb094b3e08080b3dfe59191fa5e061d73bc5b03a4" Nov 28 16:00:20 crc kubenswrapper[4647]: I1128 16:00:20.467983 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sg2pd"] Nov 28 16:00:20 crc kubenswrapper[4647]: I1128 16:00:20.488926 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sg2pd"] Nov 28 16:00:20 crc kubenswrapper[4647]: I1128 16:00:20.493551 4647 scope.go:117] "RemoveContainer" containerID="1d216006dbf397ea4988fd3cc0844b342c57349bb57d3744d2b49d3a850aac98" Nov 28 16:00:22 crc kubenswrapper[4647]: I1128 16:00:22.413683 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b82c047f-7d51-4057-bf9c-c96f63de8a40" path="/var/lib/kubelet/pods/b82c047f-7d51-4057-bf9c-c96f63de8a40/volumes" Nov 28 16:00:47 crc kubenswrapper[4647]: I1128 16:00:47.022706 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:00:47 crc kubenswrapper[4647]: I1128 16:00:47.023585 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:00:47 crc kubenswrapper[4647]: I1128 16:00:47.023669 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:00:47 crc kubenswrapper[4647]: I1128 16:00:47.025028 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:00:47 crc kubenswrapper[4647]: I1128 16:00:47.025152 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" gracePeriod=600 Nov 28 16:00:47 crc kubenswrapper[4647]: E1128 16:00:47.146780 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:00:47 crc kubenswrapper[4647]: I1128 16:00:47.458484 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" exitCode=0 Nov 28 16:00:47 crc kubenswrapper[4647]: I1128 16:00:47.458548 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a"} Nov 28 16:00:47 crc kubenswrapper[4647]: I1128 16:00:47.458586 4647 scope.go:117] "RemoveContainer" containerID="fbd9955acb6f04109aaef547a7a78f405714e4a2cfc5b9f97b929edb35fff07d" Nov 28 16:00:47 crc kubenswrapper[4647]: I1128 16:00:47.459551 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:00:47 crc kubenswrapper[4647]: E1128 16:00:47.459962 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:00:58 crc kubenswrapper[4647]: I1128 16:00:58.395910 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:00:58 crc kubenswrapper[4647]: E1128 16:00:58.397050 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.172260 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29405761-4rc8f"] Nov 28 16:01:00 crc kubenswrapper[4647]: E1128 16:01:00.173078 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerName="extract-utilities" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.173095 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerName="extract-utilities" Nov 28 16:01:00 crc kubenswrapper[4647]: E1128 16:01:00.173127 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerName="extract-content" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.173136 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerName="extract-content" Nov 28 16:01:00 crc kubenswrapper[4647]: E1128 16:01:00.173156 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75c74919-4a74-44fb-983f-1d4780061e77" containerName="collect-profiles" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.173164 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="75c74919-4a74-44fb-983f-1d4780061e77" containerName="collect-profiles" Nov 28 16:01:00 crc kubenswrapper[4647]: E1128 16:01:00.173189 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerName="registry-server" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.173196 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerName="registry-server" Nov 28 16:01:00 crc kubenswrapper[4647]: E1128 16:01:00.173212 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerName="extract-utilities" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.173219 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerName="extract-utilities" Nov 28 16:01:00 crc kubenswrapper[4647]: E1128 16:01:00.173236 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerName="registry-server" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.173243 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerName="registry-server" Nov 28 16:01:00 crc kubenswrapper[4647]: E1128 16:01:00.173260 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerName="extract-content" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.173268 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerName="extract-content" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.173516 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="021d6756-e0dd-437c-b08f-26b3fdd67beb" containerName="registry-server" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.173545 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="b82c047f-7d51-4057-bf9c-c96f63de8a40" containerName="registry-server" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.173568 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="75c74919-4a74-44fb-983f-1d4780061e77" containerName="collect-profiles" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.174713 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.215985 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405761-4rc8f"] Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.310216 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-config-data\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.310275 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-fernet-keys\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.310381 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-combined-ca-bundle\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.310436 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpn5n\" (UniqueName: \"kubernetes.io/projected/03bc8de1-2028-4b26-bf81-c51d09cf6a71-kube-api-access-fpn5n\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.411937 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-config-data\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.412000 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-fernet-keys\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.412303 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-combined-ca-bundle\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.412336 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpn5n\" (UniqueName: \"kubernetes.io/projected/03bc8de1-2028-4b26-bf81-c51d09cf6a71-kube-api-access-fpn5n\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.418736 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-combined-ca-bundle\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.418745 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-fernet-keys\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.420434 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-config-data\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.437003 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpn5n\" (UniqueName: \"kubernetes.io/projected/03bc8de1-2028-4b26-bf81-c51d09cf6a71-kube-api-access-fpn5n\") pod \"keystone-cron-29405761-4rc8f\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:00 crc kubenswrapper[4647]: I1128 16:01:00.508215 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:01 crc kubenswrapper[4647]: I1128 16:01:01.030255 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405761-4rc8f"] Nov 28 16:01:01 crc kubenswrapper[4647]: I1128 16:01:01.624802 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405761-4rc8f" event={"ID":"03bc8de1-2028-4b26-bf81-c51d09cf6a71","Type":"ContainerStarted","Data":"6d20f7538484a48715bc9c94c490dc0e09477eeabcce16d89d882ad7105042e7"} Nov 28 16:01:02 crc kubenswrapper[4647]: I1128 16:01:02.639842 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405761-4rc8f" event={"ID":"03bc8de1-2028-4b26-bf81-c51d09cf6a71","Type":"ContainerStarted","Data":"4e7375fdfa00d8b45cd0abf7f023a9bfa7b7782b9169872dc370ff5f265ad968"} Nov 28 16:01:02 crc kubenswrapper[4647]: I1128 16:01:02.642764 4647 generic.go:334] "Generic (PLEG): container finished" podID="7123f6db-1e1e-4bfb-97ca-f142f6cdb13a" containerID="a6d83285a3fb64433296f5235b6d11e1c51d2aedf2c802c8ef33b19d662488ce" exitCode=0 Nov 28 16:01:02 crc kubenswrapper[4647]: I1128 16:01:02.642809 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" event={"ID":"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a","Type":"ContainerDied","Data":"a6d83285a3fb64433296f5235b6d11e1c51d2aedf2c802c8ef33b19d662488ce"} Nov 28 16:01:02 crc kubenswrapper[4647]: I1128 16:01:02.682945 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29405761-4rc8f" podStartSLOduration=2.682913766 podStartE2EDuration="2.682913766s" podCreationTimestamp="2025-11-28 16:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:01:02.676128685 +0000 UTC m=+2192.523735146" watchObservedRunningTime="2025-11-28 16:01:02.682913766 +0000 UTC m=+2192.530520207" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.094403 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.195036 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-inventory\") pod \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.195383 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovn-combined-ca-bundle\") pod \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.195580 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgknb\" (UniqueName: \"kubernetes.io/projected/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-kube-api-access-dgknb\") pod \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.195616 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ssh-key\") pod \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.196004 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovncontroller-config-0\") pod \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\" (UID: \"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a\") " Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.209387 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-kube-api-access-dgknb" (OuterVolumeSpecName: "kube-api-access-dgknb") pod "7123f6db-1e1e-4bfb-97ca-f142f6cdb13a" (UID: "7123f6db-1e1e-4bfb-97ca-f142f6cdb13a"). InnerVolumeSpecName "kube-api-access-dgknb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.230624 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "7123f6db-1e1e-4bfb-97ca-f142f6cdb13a" (UID: "7123f6db-1e1e-4bfb-97ca-f142f6cdb13a"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.243681 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "7123f6db-1e1e-4bfb-97ca-f142f6cdb13a" (UID: "7123f6db-1e1e-4bfb-97ca-f142f6cdb13a"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.244898 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-inventory" (OuterVolumeSpecName: "inventory") pod "7123f6db-1e1e-4bfb-97ca-f142f6cdb13a" (UID: "7123f6db-1e1e-4bfb-97ca-f142f6cdb13a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.247601 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7123f6db-1e1e-4bfb-97ca-f142f6cdb13a" (UID: "7123f6db-1e1e-4bfb-97ca-f142f6cdb13a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.298651 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.298687 4647 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.298699 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgknb\" (UniqueName: \"kubernetes.io/projected/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-kube-api-access-dgknb\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.298708 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.298717 4647 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7123f6db-1e1e-4bfb-97ca-f142f6cdb13a-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.667046 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" event={"ID":"7123f6db-1e1e-4bfb-97ca-f142f6cdb13a","Type":"ContainerDied","Data":"7d412630536a4a64e15d3d82f324e07707ff3ae972f4be1722e53fc32f8e83af"} Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.667106 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rrkrm" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.667122 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d412630536a4a64e15d3d82f324e07707ff3ae972f4be1722e53fc32f8e83af" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.857265 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc"] Nov 28 16:01:04 crc kubenswrapper[4647]: E1128 16:01:04.857963 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7123f6db-1e1e-4bfb-97ca-f142f6cdb13a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.858030 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="7123f6db-1e1e-4bfb-97ca-f142f6cdb13a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.858310 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="7123f6db-1e1e-4bfb-97ca-f142f6cdb13a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.859063 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.863956 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.864348 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.864604 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.864695 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.864709 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.864842 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 28 16:01:04 crc kubenswrapper[4647]: I1128 16:01:04.875628 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc"] Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.011731 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.011812 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.011862 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7bk4\" (UniqueName: \"kubernetes.io/projected/1180d1cb-f9bc-4646-864d-0bdea17fd99f-kube-api-access-j7bk4\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.011902 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.011945 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.011979 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.113514 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.113577 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7bk4\" (UniqueName: \"kubernetes.io/projected/1180d1cb-f9bc-4646-864d-0bdea17fd99f-kube-api-access-j7bk4\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.113612 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.113665 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.113705 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.113760 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.117702 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.118915 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.120963 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.121981 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.122218 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.134989 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7bk4\" (UniqueName: \"kubernetes.io/projected/1180d1cb-f9bc-4646-864d-0bdea17fd99f-kube-api-access-j7bk4\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.176228 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.679505 4647 generic.go:334] "Generic (PLEG): container finished" podID="03bc8de1-2028-4b26-bf81-c51d09cf6a71" containerID="4e7375fdfa00d8b45cd0abf7f023a9bfa7b7782b9169872dc370ff5f265ad968" exitCode=0 Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.679945 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405761-4rc8f" event={"ID":"03bc8de1-2028-4b26-bf81-c51d09cf6a71","Type":"ContainerDied","Data":"4e7375fdfa00d8b45cd0abf7f023a9bfa7b7782b9169872dc370ff5f265ad968"} Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.736892 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4n5hk"] Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.739635 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.758541 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4n5hk"] Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.831432 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc"] Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.841490 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.850060 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-utilities\") pod \"redhat-operators-4n5hk\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.850148 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lz5z\" (UniqueName: \"kubernetes.io/projected/ae281819-940f-4e6a-a0d7-cb42b1261031-kube-api-access-4lz5z\") pod \"redhat-operators-4n5hk\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.850215 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-catalog-content\") pod \"redhat-operators-4n5hk\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.951964 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-utilities\") pod \"redhat-operators-4n5hk\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.952027 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lz5z\" (UniqueName: \"kubernetes.io/projected/ae281819-940f-4e6a-a0d7-cb42b1261031-kube-api-access-4lz5z\") pod \"redhat-operators-4n5hk\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.952063 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-catalog-content\") pod \"redhat-operators-4n5hk\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.952525 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-utilities\") pod \"redhat-operators-4n5hk\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.952557 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-catalog-content\") pod \"redhat-operators-4n5hk\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:05 crc kubenswrapper[4647]: I1128 16:01:05.977811 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lz5z\" (UniqueName: \"kubernetes.io/projected/ae281819-940f-4e6a-a0d7-cb42b1261031-kube-api-access-4lz5z\") pod \"redhat-operators-4n5hk\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:06 crc kubenswrapper[4647]: I1128 16:01:06.071618 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:06 crc kubenswrapper[4647]: I1128 16:01:06.552607 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4n5hk"] Nov 28 16:01:06 crc kubenswrapper[4647]: I1128 16:01:06.688554 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4n5hk" event={"ID":"ae281819-940f-4e6a-a0d7-cb42b1261031","Type":"ContainerStarted","Data":"c7092a04e05bd09e298079f4b332e7fe642dcaaf02efdfb297fa68169a079040"} Nov 28 16:01:06 crc kubenswrapper[4647]: I1128 16:01:06.690556 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" event={"ID":"1180d1cb-f9bc-4646-864d-0bdea17fd99f","Type":"ContainerStarted","Data":"2f7680c47ec28be740a0aba165da0481ded3e3edda9daf8294aea1f7d355fb25"} Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.114333 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.277449 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpn5n\" (UniqueName: \"kubernetes.io/projected/03bc8de1-2028-4b26-bf81-c51d09cf6a71-kube-api-access-fpn5n\") pod \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.278058 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-fernet-keys\") pod \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.278247 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-combined-ca-bundle\") pod \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.278436 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-config-data\") pod \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\" (UID: \"03bc8de1-2028-4b26-bf81-c51d09cf6a71\") " Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.286610 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03bc8de1-2028-4b26-bf81-c51d09cf6a71-kube-api-access-fpn5n" (OuterVolumeSpecName: "kube-api-access-fpn5n") pod "03bc8de1-2028-4b26-bf81-c51d09cf6a71" (UID: "03bc8de1-2028-4b26-bf81-c51d09cf6a71"). InnerVolumeSpecName "kube-api-access-fpn5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.289238 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "03bc8de1-2028-4b26-bf81-c51d09cf6a71" (UID: "03bc8de1-2028-4b26-bf81-c51d09cf6a71"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.306046 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "03bc8de1-2028-4b26-bf81-c51d09cf6a71" (UID: "03bc8de1-2028-4b26-bf81-c51d09cf6a71"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.329593 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-config-data" (OuterVolumeSpecName: "config-data") pod "03bc8de1-2028-4b26-bf81-c51d09cf6a71" (UID: "03bc8de1-2028-4b26-bf81-c51d09cf6a71"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.381225 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpn5n\" (UniqueName: \"kubernetes.io/projected/03bc8de1-2028-4b26-bf81-c51d09cf6a71-kube-api-access-fpn5n\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.381264 4647 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.381275 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.381283 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03bc8de1-2028-4b26-bf81-c51d09cf6a71-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.702961 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" event={"ID":"1180d1cb-f9bc-4646-864d-0bdea17fd99f","Type":"ContainerStarted","Data":"d1ccc1e92b84a01ca8d61ef273ea257e7e5bd5e93076add74b673cba3a045144"} Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.705333 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405761-4rc8f" event={"ID":"03bc8de1-2028-4b26-bf81-c51d09cf6a71","Type":"ContainerDied","Data":"6d20f7538484a48715bc9c94c490dc0e09477eeabcce16d89d882ad7105042e7"} Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.705364 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d20f7538484a48715bc9c94c490dc0e09477eeabcce16d89d882ad7105042e7" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.705402 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405761-4rc8f" Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.709362 4647 generic.go:334] "Generic (PLEG): container finished" podID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerID="ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967" exitCode=0 Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.709505 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4n5hk" event={"ID":"ae281819-940f-4e6a-a0d7-cb42b1261031","Type":"ContainerDied","Data":"ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967"} Nov 28 16:01:07 crc kubenswrapper[4647]: I1128 16:01:07.739036 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" podStartSLOduration=2.47342283 podStartE2EDuration="3.739018103s" podCreationTimestamp="2025-11-28 16:01:04 +0000 UTC" firstStartedPulling="2025-11-28 16:01:05.841242789 +0000 UTC m=+2195.688849210" lastFinishedPulling="2025-11-28 16:01:07.106838062 +0000 UTC m=+2196.954444483" observedRunningTime="2025-11-28 16:01:07.733497286 +0000 UTC m=+2197.581103707" watchObservedRunningTime="2025-11-28 16:01:07.739018103 +0000 UTC m=+2197.586624534" Nov 28 16:01:09 crc kubenswrapper[4647]: I1128 16:01:09.394152 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:01:09 crc kubenswrapper[4647]: E1128 16:01:09.395128 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:01:09 crc kubenswrapper[4647]: I1128 16:01:09.735475 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4n5hk" event={"ID":"ae281819-940f-4e6a-a0d7-cb42b1261031","Type":"ContainerStarted","Data":"a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74"} Nov 28 16:01:13 crc kubenswrapper[4647]: I1128 16:01:13.793840 4647 generic.go:334] "Generic (PLEG): container finished" podID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerID="a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74" exitCode=0 Nov 28 16:01:13 crc kubenswrapper[4647]: I1128 16:01:13.793904 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4n5hk" event={"ID":"ae281819-940f-4e6a-a0d7-cb42b1261031","Type":"ContainerDied","Data":"a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74"} Nov 28 16:01:14 crc kubenswrapper[4647]: I1128 16:01:14.690615 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-2csd4" podUID="8ee0a7ea-967a-457c-9d3b-1eb46c99b719" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:01:15 crc kubenswrapper[4647]: I1128 16:01:15.820574 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4n5hk" event={"ID":"ae281819-940f-4e6a-a0d7-cb42b1261031","Type":"ContainerStarted","Data":"7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d"} Nov 28 16:01:16 crc kubenswrapper[4647]: I1128 16:01:16.071843 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:16 crc kubenswrapper[4647]: I1128 16:01:16.071929 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:17 crc kubenswrapper[4647]: I1128 16:01:17.125619 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4n5hk" podUID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerName="registry-server" probeResult="failure" output=< Nov 28 16:01:17 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 16:01:17 crc kubenswrapper[4647]: > Nov 28 16:01:21 crc kubenswrapper[4647]: I1128 16:01:21.394300 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:01:21 crc kubenswrapper[4647]: E1128 16:01:21.395176 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:01:26 crc kubenswrapper[4647]: I1128 16:01:26.121829 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:26 crc kubenswrapper[4647]: I1128 16:01:26.145908 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4n5hk" podStartSLOduration=13.947276195 podStartE2EDuration="21.145890542s" podCreationTimestamp="2025-11-28 16:01:05 +0000 UTC" firstStartedPulling="2025-11-28 16:01:07.711377528 +0000 UTC m=+2197.558983959" lastFinishedPulling="2025-11-28 16:01:14.909991875 +0000 UTC m=+2204.757598306" observedRunningTime="2025-11-28 16:01:15.856278417 +0000 UTC m=+2205.703884838" watchObservedRunningTime="2025-11-28 16:01:26.145890542 +0000 UTC m=+2215.993496963" Nov 28 16:01:26 crc kubenswrapper[4647]: I1128 16:01:26.173523 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:27 crc kubenswrapper[4647]: I1128 16:01:27.494167 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4n5hk"] Nov 28 16:01:27 crc kubenswrapper[4647]: I1128 16:01:27.953543 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4n5hk" podUID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerName="registry-server" containerID="cri-o://7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d" gracePeriod=2 Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.489591 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.515984 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-utilities\") pod \"ae281819-940f-4e6a-a0d7-cb42b1261031\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.516035 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-catalog-content\") pod \"ae281819-940f-4e6a-a0d7-cb42b1261031\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.516126 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lz5z\" (UniqueName: \"kubernetes.io/projected/ae281819-940f-4e6a-a0d7-cb42b1261031-kube-api-access-4lz5z\") pod \"ae281819-940f-4e6a-a0d7-cb42b1261031\" (UID: \"ae281819-940f-4e6a-a0d7-cb42b1261031\") " Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.516925 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-utilities" (OuterVolumeSpecName: "utilities") pod "ae281819-940f-4e6a-a0d7-cb42b1261031" (UID: "ae281819-940f-4e6a-a0d7-cb42b1261031"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.519349 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.534706 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae281819-940f-4e6a-a0d7-cb42b1261031-kube-api-access-4lz5z" (OuterVolumeSpecName: "kube-api-access-4lz5z") pod "ae281819-940f-4e6a-a0d7-cb42b1261031" (UID: "ae281819-940f-4e6a-a0d7-cb42b1261031"). InnerVolumeSpecName "kube-api-access-4lz5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.621070 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lz5z\" (UniqueName: \"kubernetes.io/projected/ae281819-940f-4e6a-a0d7-cb42b1261031-kube-api-access-4lz5z\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.643529 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae281819-940f-4e6a-a0d7-cb42b1261031" (UID: "ae281819-940f-4e6a-a0d7-cb42b1261031"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.723747 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae281819-940f-4e6a-a0d7-cb42b1261031-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.963927 4647 generic.go:334] "Generic (PLEG): container finished" podID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerID="7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d" exitCode=0 Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.963967 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4n5hk" event={"ID":"ae281819-940f-4e6a-a0d7-cb42b1261031","Type":"ContainerDied","Data":"7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d"} Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.963994 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4n5hk" event={"ID":"ae281819-940f-4e6a-a0d7-cb42b1261031","Type":"ContainerDied","Data":"c7092a04e05bd09e298079f4b332e7fe642dcaaf02efdfb297fa68169a079040"} Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.964012 4647 scope.go:117] "RemoveContainer" containerID="7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d" Nov 28 16:01:28 crc kubenswrapper[4647]: I1128 16:01:28.964009 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4n5hk" Nov 28 16:01:29 crc kubenswrapper[4647]: I1128 16:01:29.000633 4647 scope.go:117] "RemoveContainer" containerID="a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74" Nov 28 16:01:29 crc kubenswrapper[4647]: I1128 16:01:29.003502 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4n5hk"] Nov 28 16:01:29 crc kubenswrapper[4647]: I1128 16:01:29.024226 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4n5hk"] Nov 28 16:01:29 crc kubenswrapper[4647]: I1128 16:01:29.024690 4647 scope.go:117] "RemoveContainer" containerID="ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967" Nov 28 16:01:29 crc kubenswrapper[4647]: I1128 16:01:29.081239 4647 scope.go:117] "RemoveContainer" containerID="7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d" Nov 28 16:01:29 crc kubenswrapper[4647]: E1128 16:01:29.081819 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d\": container with ID starting with 7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d not found: ID does not exist" containerID="7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d" Nov 28 16:01:29 crc kubenswrapper[4647]: I1128 16:01:29.081863 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d"} err="failed to get container status \"7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d\": rpc error: code = NotFound desc = could not find container \"7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d\": container with ID starting with 7e3540e639a499ff0482f99729ebc33952646d28bb829c09bfb958d1a234c58d not found: ID does not exist" Nov 28 16:01:29 crc kubenswrapper[4647]: I1128 16:01:29.081890 4647 scope.go:117] "RemoveContainer" containerID="a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74" Nov 28 16:01:29 crc kubenswrapper[4647]: E1128 16:01:29.082367 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74\": container with ID starting with a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74 not found: ID does not exist" containerID="a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74" Nov 28 16:01:29 crc kubenswrapper[4647]: I1128 16:01:29.082400 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74"} err="failed to get container status \"a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74\": rpc error: code = NotFound desc = could not find container \"a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74\": container with ID starting with a78fb1fe4c06fe48b8d606a7e16274bd7c741610377221d4c94deec0a309ce74 not found: ID does not exist" Nov 28 16:01:29 crc kubenswrapper[4647]: I1128 16:01:29.082933 4647 scope.go:117] "RemoveContainer" containerID="ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967" Nov 28 16:01:29 crc kubenswrapper[4647]: E1128 16:01:29.083296 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967\": container with ID starting with ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967 not found: ID does not exist" containerID="ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967" Nov 28 16:01:29 crc kubenswrapper[4647]: I1128 16:01:29.083317 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967"} err="failed to get container status \"ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967\": rpc error: code = NotFound desc = could not find container \"ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967\": container with ID starting with ecbe27efebf7a1deeee3506447c82b9a02b7c5a4a2e95d4b3f8d40445a863967 not found: ID does not exist" Nov 28 16:01:30 crc kubenswrapper[4647]: I1128 16:01:30.426254 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae281819-940f-4e6a-a0d7-cb42b1261031" path="/var/lib/kubelet/pods/ae281819-940f-4e6a-a0d7-cb42b1261031/volumes" Nov 28 16:01:32 crc kubenswrapper[4647]: I1128 16:01:32.396099 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:01:32 crc kubenswrapper[4647]: E1128 16:01:32.397254 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:01:46 crc kubenswrapper[4647]: I1128 16:01:46.394961 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:01:46 crc kubenswrapper[4647]: E1128 16:01:46.395890 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:01:58 crc kubenswrapper[4647]: I1128 16:01:58.394928 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:01:58 crc kubenswrapper[4647]: E1128 16:01:58.396140 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:02:06 crc kubenswrapper[4647]: I1128 16:02:06.365642 4647 generic.go:334] "Generic (PLEG): container finished" podID="1180d1cb-f9bc-4646-864d-0bdea17fd99f" containerID="d1ccc1e92b84a01ca8d61ef273ea257e7e5bd5e93076add74b673cba3a045144" exitCode=0 Nov 28 16:02:06 crc kubenswrapper[4647]: I1128 16:02:06.365752 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" event={"ID":"1180d1cb-f9bc-4646-864d-0bdea17fd99f","Type":"ContainerDied","Data":"d1ccc1e92b84a01ca8d61ef273ea257e7e5bd5e93076add74b673cba3a045144"} Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.796990 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.836882 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-ssh-key\") pod \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.836973 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-metadata-combined-ca-bundle\") pod \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.837006 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7bk4\" (UniqueName: \"kubernetes.io/projected/1180d1cb-f9bc-4646-864d-0bdea17fd99f-kube-api-access-j7bk4\") pod \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.838126 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-inventory\") pod \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.838156 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-ovn-metadata-agent-neutron-config-0\") pod \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.838280 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-nova-metadata-neutron-config-0\") pod \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\" (UID: \"1180d1cb-f9bc-4646-864d-0bdea17fd99f\") " Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.844898 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "1180d1cb-f9bc-4646-864d-0bdea17fd99f" (UID: "1180d1cb-f9bc-4646-864d-0bdea17fd99f"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.845699 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1180d1cb-f9bc-4646-864d-0bdea17fd99f-kube-api-access-j7bk4" (OuterVolumeSpecName: "kube-api-access-j7bk4") pod "1180d1cb-f9bc-4646-864d-0bdea17fd99f" (UID: "1180d1cb-f9bc-4646-864d-0bdea17fd99f"). InnerVolumeSpecName "kube-api-access-j7bk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.868361 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1180d1cb-f9bc-4646-864d-0bdea17fd99f" (UID: "1180d1cb-f9bc-4646-864d-0bdea17fd99f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.873580 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-inventory" (OuterVolumeSpecName: "inventory") pod "1180d1cb-f9bc-4646-864d-0bdea17fd99f" (UID: "1180d1cb-f9bc-4646-864d-0bdea17fd99f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.878175 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "1180d1cb-f9bc-4646-864d-0bdea17fd99f" (UID: "1180d1cb-f9bc-4646-864d-0bdea17fd99f"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.889273 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "1180d1cb-f9bc-4646-864d-0bdea17fd99f" (UID: "1180d1cb-f9bc-4646-864d-0bdea17fd99f"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.940893 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.941751 4647 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.941901 4647 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.942033 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.942150 4647 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1180d1cb-f9bc-4646-864d-0bdea17fd99f-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:02:07 crc kubenswrapper[4647]: I1128 16:02:07.942292 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7bk4\" (UniqueName: \"kubernetes.io/projected/1180d1cb-f9bc-4646-864d-0bdea17fd99f-kube-api-access-j7bk4\") on node \"crc\" DevicePath \"\"" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.394794 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.409238 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc" event={"ID":"1180d1cb-f9bc-4646-864d-0bdea17fd99f","Type":"ContainerDied","Data":"2f7680c47ec28be740a0aba165da0481ded3e3edda9daf8294aea1f7d355fb25"} Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.409310 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f7680c47ec28be740a0aba165da0481ded3e3edda9daf8294aea1f7d355fb25" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.551817 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5"] Nov 28 16:02:08 crc kubenswrapper[4647]: E1128 16:02:08.552674 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerName="extract-utilities" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.564617 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerName="extract-utilities" Nov 28 16:02:08 crc kubenswrapper[4647]: E1128 16:02:08.564759 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerName="extract-content" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.564774 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerName="extract-content" Nov 28 16:02:08 crc kubenswrapper[4647]: E1128 16:02:08.564805 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerName="registry-server" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.564811 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerName="registry-server" Nov 28 16:02:08 crc kubenswrapper[4647]: E1128 16:02:08.564825 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1180d1cb-f9bc-4646-864d-0bdea17fd99f" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.564835 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="1180d1cb-f9bc-4646-864d-0bdea17fd99f" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 28 16:02:08 crc kubenswrapper[4647]: E1128 16:02:08.564851 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03bc8de1-2028-4b26-bf81-c51d09cf6a71" containerName="keystone-cron" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.564857 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="03bc8de1-2028-4b26-bf81-c51d09cf6a71" containerName="keystone-cron" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.565240 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae281819-940f-4e6a-a0d7-cb42b1261031" containerName="registry-server" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.565262 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="1180d1cb-f9bc-4646-864d-0bdea17fd99f" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.565284 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="03bc8de1-2028-4b26-bf81-c51d09cf6a71" containerName="keystone-cron" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.566078 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5"] Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.566140 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.569850 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.569879 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.570064 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.570070 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.578008 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.655618 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.655740 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.655763 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtc8p\" (UniqueName: \"kubernetes.io/projected/93e3bb5f-ef6f-44de-9f2c-aa13871df572-kube-api-access-xtc8p\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.655788 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.655826 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.756652 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.757147 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.757178 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtc8p\" (UniqueName: \"kubernetes.io/projected/93e3bb5f-ef6f-44de-9f2c-aa13871df572-kube-api-access-xtc8p\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.757383 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.758382 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.763929 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.765002 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.765087 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.767627 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.781145 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtc8p\" (UniqueName: \"kubernetes.io/projected/93e3bb5f-ef6f-44de-9f2c-aa13871df572-kube-api-access-xtc8p\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:08 crc kubenswrapper[4647]: I1128 16:02:08.891047 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:02:09 crc kubenswrapper[4647]: W1128 16:02:09.468702 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod93e3bb5f_ef6f_44de_9f2c_aa13871df572.slice/crio-66c2113f1632c9a6adcd04213e4161882a3a38d9d34445e076ee4f2f1a9fca22 WatchSource:0}: Error finding container 66c2113f1632c9a6adcd04213e4161882a3a38d9d34445e076ee4f2f1a9fca22: Status 404 returned error can't find the container with id 66c2113f1632c9a6adcd04213e4161882a3a38d9d34445e076ee4f2f1a9fca22 Nov 28 16:02:09 crc kubenswrapper[4647]: I1128 16:02:09.475383 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5"] Nov 28 16:02:10 crc kubenswrapper[4647]: I1128 16:02:10.429308 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" event={"ID":"93e3bb5f-ef6f-44de-9f2c-aa13871df572","Type":"ContainerStarted","Data":"ab01d58d388042a2786a8f7610dee5209564f1ab6b4d68f1ba779dcfe865c088"} Nov 28 16:02:10 crc kubenswrapper[4647]: I1128 16:02:10.429714 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" event={"ID":"93e3bb5f-ef6f-44de-9f2c-aa13871df572","Type":"ContainerStarted","Data":"66c2113f1632c9a6adcd04213e4161882a3a38d9d34445e076ee4f2f1a9fca22"} Nov 28 16:02:10 crc kubenswrapper[4647]: I1128 16:02:10.461184 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" podStartSLOduration=1.871453713 podStartE2EDuration="2.461165926s" podCreationTimestamp="2025-11-28 16:02:08 +0000 UTC" firstStartedPulling="2025-11-28 16:02:09.47147472 +0000 UTC m=+2259.319081141" lastFinishedPulling="2025-11-28 16:02:10.061186933 +0000 UTC m=+2259.908793354" observedRunningTime="2025-11-28 16:02:10.455036103 +0000 UTC m=+2260.302642534" watchObservedRunningTime="2025-11-28 16:02:10.461165926 +0000 UTC m=+2260.308772347" Nov 28 16:02:11 crc kubenswrapper[4647]: I1128 16:02:11.395333 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:02:11 crc kubenswrapper[4647]: E1128 16:02:11.396123 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:02:25 crc kubenswrapper[4647]: I1128 16:02:25.393999 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:02:25 crc kubenswrapper[4647]: E1128 16:02:25.394861 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:02:39 crc kubenswrapper[4647]: I1128 16:02:39.394809 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:02:39 crc kubenswrapper[4647]: E1128 16:02:39.395927 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:02:50 crc kubenswrapper[4647]: I1128 16:02:50.402954 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:02:50 crc kubenswrapper[4647]: E1128 16:02:50.404300 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:03:01 crc kubenswrapper[4647]: I1128 16:03:01.398035 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:03:01 crc kubenswrapper[4647]: E1128 16:03:01.399194 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:03:16 crc kubenswrapper[4647]: I1128 16:03:16.394784 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:03:16 crc kubenswrapper[4647]: E1128 16:03:16.396005 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:03:28 crc kubenswrapper[4647]: I1128 16:03:28.394861 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:03:28 crc kubenswrapper[4647]: E1128 16:03:28.396310 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:03:43 crc kubenswrapper[4647]: I1128 16:03:43.395113 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:03:43 crc kubenswrapper[4647]: E1128 16:03:43.396296 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:03:55 crc kubenswrapper[4647]: I1128 16:03:55.395287 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:03:55 crc kubenswrapper[4647]: E1128 16:03:55.397870 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:04:10 crc kubenswrapper[4647]: I1128 16:04:10.410320 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:04:10 crc kubenswrapper[4647]: E1128 16:04:10.411967 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:04:23 crc kubenswrapper[4647]: I1128 16:04:23.395297 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:04:23 crc kubenswrapper[4647]: E1128 16:04:23.396484 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:04:34 crc kubenswrapper[4647]: I1128 16:04:34.395026 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:04:34 crc kubenswrapper[4647]: E1128 16:04:34.395811 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:04:49 crc kubenswrapper[4647]: I1128 16:04:49.394891 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:04:49 crc kubenswrapper[4647]: E1128 16:04:49.395678 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:05:03 crc kubenswrapper[4647]: I1128 16:05:03.395026 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:05:03 crc kubenswrapper[4647]: E1128 16:05:03.395914 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:05:17 crc kubenswrapper[4647]: I1128 16:05:17.394787 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:05:17 crc kubenswrapper[4647]: E1128 16:05:17.395713 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:05:32 crc kubenswrapper[4647]: I1128 16:05:32.395364 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:05:32 crc kubenswrapper[4647]: E1128 16:05:32.396173 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:05:46 crc kubenswrapper[4647]: I1128 16:05:46.394039 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:05:46 crc kubenswrapper[4647]: E1128 16:05:46.394837 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:05:59 crc kubenswrapper[4647]: I1128 16:05:59.394633 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:06:00 crc kubenswrapper[4647]: I1128 16:06:00.993176 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"300f92cccae17a8a17699dc960ff9faee3713835016c8656160147ae4942fdeb"} Nov 28 16:06:16 crc kubenswrapper[4647]: I1128 16:06:16.515910 4647 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-6f577f58dc-7rp75" podUID="c5168b52-c295-45d6-aa36-932b5bb95a97" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 28 16:07:38 crc kubenswrapper[4647]: I1128 16:07:38.030046 4647 generic.go:334] "Generic (PLEG): container finished" podID="93e3bb5f-ef6f-44de-9f2c-aa13871df572" containerID="ab01d58d388042a2786a8f7610dee5209564f1ab6b4d68f1ba779dcfe865c088" exitCode=0 Nov 28 16:07:38 crc kubenswrapper[4647]: I1128 16:07:38.030171 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" event={"ID":"93e3bb5f-ef6f-44de-9f2c-aa13871df572","Type":"ContainerDied","Data":"ab01d58d388042a2786a8f7610dee5209564f1ab6b4d68f1ba779dcfe865c088"} Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.478198 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.566490 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-secret-0\") pod \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.566636 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-ssh-key\") pod \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.566715 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtc8p\" (UniqueName: \"kubernetes.io/projected/93e3bb5f-ef6f-44de-9f2c-aa13871df572-kube-api-access-xtc8p\") pod \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.566788 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-combined-ca-bundle\") pod \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.566871 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-inventory\") pod \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\" (UID: \"93e3bb5f-ef6f-44de-9f2c-aa13871df572\") " Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.573902 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93e3bb5f-ef6f-44de-9f2c-aa13871df572-kube-api-access-xtc8p" (OuterVolumeSpecName: "kube-api-access-xtc8p") pod "93e3bb5f-ef6f-44de-9f2c-aa13871df572" (UID: "93e3bb5f-ef6f-44de-9f2c-aa13871df572"). InnerVolumeSpecName "kube-api-access-xtc8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.589598 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "93e3bb5f-ef6f-44de-9f2c-aa13871df572" (UID: "93e3bb5f-ef6f-44de-9f2c-aa13871df572"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.595098 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "93e3bb5f-ef6f-44de-9f2c-aa13871df572" (UID: "93e3bb5f-ef6f-44de-9f2c-aa13871df572"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.608213 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "93e3bb5f-ef6f-44de-9f2c-aa13871df572" (UID: "93e3bb5f-ef6f-44de-9f2c-aa13871df572"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.633405 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-inventory" (OuterVolumeSpecName: "inventory") pod "93e3bb5f-ef6f-44de-9f2c-aa13871df572" (UID: "93e3bb5f-ef6f-44de-9f2c-aa13871df572"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.669117 4647 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.669155 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.669167 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtc8p\" (UniqueName: \"kubernetes.io/projected/93e3bb5f-ef6f-44de-9f2c-aa13871df572-kube-api-access-xtc8p\") on node \"crc\" DevicePath \"\"" Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.669182 4647 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:07:39 crc kubenswrapper[4647]: I1128 16:07:39.669194 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93e3bb5f-ef6f-44de-9f2c-aa13871df572-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.054397 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.054406 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5" event={"ID":"93e3bb5f-ef6f-44de-9f2c-aa13871df572","Type":"ContainerDied","Data":"66c2113f1632c9a6adcd04213e4161882a3a38d9d34445e076ee4f2f1a9fca22"} Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.054538 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66c2113f1632c9a6adcd04213e4161882a3a38d9d34445e076ee4f2f1a9fca22" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.274965 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl"] Nov 28 16:07:40 crc kubenswrapper[4647]: E1128 16:07:40.289465 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93e3bb5f-ef6f-44de-9f2c-aa13871df572" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.289521 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="93e3bb5f-ef6f-44de-9f2c-aa13871df572" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.290183 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="93e3bb5f-ef6f-44de-9f2c-aa13871df572" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.291534 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.301116 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.301683 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl"] Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.303111 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.303490 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.303803 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.304086 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.304371 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.304937 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.389295 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.389363 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.389394 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.389483 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.389619 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.389679 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/338f9128-79ea-4cda-b4e8-7664e6057225-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.389967 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.390062 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5khwx\" (UniqueName: \"kubernetes.io/projected/338f9128-79ea-4cda-b4e8-7664e6057225-kube-api-access-5khwx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.390082 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.492201 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.492277 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.492323 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/338f9128-79ea-4cda-b4e8-7664e6057225-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.492367 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.492449 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5khwx\" (UniqueName: \"kubernetes.io/projected/338f9128-79ea-4cda-b4e8-7664e6057225-kube-api-access-5khwx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.492468 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.492505 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.492567 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.492598 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.494101 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/338f9128-79ea-4cda-b4e8-7664e6057225-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.499911 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.499930 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.500352 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.502795 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.503299 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.504088 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.504627 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.518859 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5khwx\" (UniqueName: \"kubernetes.io/projected/338f9128-79ea-4cda-b4e8-7664e6057225-kube-api-access-5khwx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-p69xl\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:40 crc kubenswrapper[4647]: I1128 16:07:40.658384 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:07:41 crc kubenswrapper[4647]: I1128 16:07:41.263554 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl"] Nov 28 16:07:41 crc kubenswrapper[4647]: I1128 16:07:41.284094 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:07:42 crc kubenswrapper[4647]: I1128 16:07:42.076360 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" event={"ID":"338f9128-79ea-4cda-b4e8-7664e6057225","Type":"ContainerStarted","Data":"6e57f6e2e84750641da337407b81c3a6c49435d2a6364e8a7e9ebc762e1c260e"} Nov 28 16:07:43 crc kubenswrapper[4647]: I1128 16:07:43.098434 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" event={"ID":"338f9128-79ea-4cda-b4e8-7664e6057225","Type":"ContainerStarted","Data":"cf07771af9bb5e721e5a01bddfbb8dda95ff487d18d2496880a33bb7f7d797f8"} Nov 28 16:07:43 crc kubenswrapper[4647]: I1128 16:07:43.138753 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" podStartSLOduration=2.335098198 podStartE2EDuration="3.138724902s" podCreationTimestamp="2025-11-28 16:07:40 +0000 UTC" firstStartedPulling="2025-11-28 16:07:41.283915511 +0000 UTC m=+2591.131521932" lastFinishedPulling="2025-11-28 16:07:42.087542205 +0000 UTC m=+2591.935148636" observedRunningTime="2025-11-28 16:07:43.120706332 +0000 UTC m=+2592.968312753" watchObservedRunningTime="2025-11-28 16:07:43.138724902 +0000 UTC m=+2592.986331363" Nov 28 16:08:17 crc kubenswrapper[4647]: I1128 16:08:17.022492 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:08:17 crc kubenswrapper[4647]: I1128 16:08:17.023063 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.239343 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zrlbb"] Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.245522 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.265472 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zrlbb"] Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.274621 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-utilities\") pod \"redhat-marketplace-zrlbb\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.274723 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-catalog-content\") pod \"redhat-marketplace-zrlbb\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.274812 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr29d\" (UniqueName: \"kubernetes.io/projected/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-kube-api-access-xr29d\") pod \"redhat-marketplace-zrlbb\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.376204 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-catalog-content\") pod \"redhat-marketplace-zrlbb\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.376662 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr29d\" (UniqueName: \"kubernetes.io/projected/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-kube-api-access-xr29d\") pod \"redhat-marketplace-zrlbb\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.376832 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-utilities\") pod \"redhat-marketplace-zrlbb\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.376953 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-catalog-content\") pod \"redhat-marketplace-zrlbb\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.377308 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-utilities\") pod \"redhat-marketplace-zrlbb\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.398166 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr29d\" (UniqueName: \"kubernetes.io/projected/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-kube-api-access-xr29d\") pod \"redhat-marketplace-zrlbb\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.630738 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:27 crc kubenswrapper[4647]: I1128 16:08:27.996135 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zrlbb"] Nov 28 16:08:28 crc kubenswrapper[4647]: I1128 16:08:28.562564 4647 generic.go:334] "Generic (PLEG): container finished" podID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerID="61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234" exitCode=0 Nov 28 16:08:28 crc kubenswrapper[4647]: I1128 16:08:28.562680 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrlbb" event={"ID":"45d5cd14-1313-40b6-a4b7-79cbe8d3d289","Type":"ContainerDied","Data":"61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234"} Nov 28 16:08:28 crc kubenswrapper[4647]: I1128 16:08:28.562922 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrlbb" event={"ID":"45d5cd14-1313-40b6-a4b7-79cbe8d3d289","Type":"ContainerStarted","Data":"11a13087e1b04af471b0e9071e8c679c1b8da056cbf31b70f75782bb675ec6bc"} Nov 28 16:08:31 crc kubenswrapper[4647]: I1128 16:08:31.592770 4647 generic.go:334] "Generic (PLEG): container finished" podID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerID="2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869" exitCode=0 Nov 28 16:08:31 crc kubenswrapper[4647]: I1128 16:08:31.592875 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrlbb" event={"ID":"45d5cd14-1313-40b6-a4b7-79cbe8d3d289","Type":"ContainerDied","Data":"2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869"} Nov 28 16:08:32 crc kubenswrapper[4647]: I1128 16:08:32.613642 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrlbb" event={"ID":"45d5cd14-1313-40b6-a4b7-79cbe8d3d289","Type":"ContainerStarted","Data":"b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894"} Nov 28 16:08:32 crc kubenswrapper[4647]: I1128 16:08:32.648221 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zrlbb" podStartSLOduration=3.021272235 podStartE2EDuration="5.648194672s" podCreationTimestamp="2025-11-28 16:08:27 +0000 UTC" firstStartedPulling="2025-11-28 16:08:29.574919565 +0000 UTC m=+2639.422525986" lastFinishedPulling="2025-11-28 16:08:32.201842002 +0000 UTC m=+2642.049448423" observedRunningTime="2025-11-28 16:08:32.647814702 +0000 UTC m=+2642.495421123" watchObservedRunningTime="2025-11-28 16:08:32.648194672 +0000 UTC m=+2642.495801123" Nov 28 16:08:37 crc kubenswrapper[4647]: I1128 16:08:37.631769 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:37 crc kubenswrapper[4647]: I1128 16:08:37.632577 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:37 crc kubenswrapper[4647]: I1128 16:08:37.703638 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:37 crc kubenswrapper[4647]: I1128 16:08:37.761760 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:37 crc kubenswrapper[4647]: I1128 16:08:37.955350 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zrlbb"] Nov 28 16:08:39 crc kubenswrapper[4647]: I1128 16:08:39.701358 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zrlbb" podUID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerName="registry-server" containerID="cri-o://b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894" gracePeriod=2 Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.249713 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.381888 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-catalog-content\") pod \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.382621 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xr29d\" (UniqueName: \"kubernetes.io/projected/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-kube-api-access-xr29d\") pod \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.382924 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-utilities\") pod \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\" (UID: \"45d5cd14-1313-40b6-a4b7-79cbe8d3d289\") " Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.383818 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-utilities" (OuterVolumeSpecName: "utilities") pod "45d5cd14-1313-40b6-a4b7-79cbe8d3d289" (UID: "45d5cd14-1313-40b6-a4b7-79cbe8d3d289"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.384542 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.402173 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "45d5cd14-1313-40b6-a4b7-79cbe8d3d289" (UID: "45d5cd14-1313-40b6-a4b7-79cbe8d3d289"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.405898 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-kube-api-access-xr29d" (OuterVolumeSpecName: "kube-api-access-xr29d") pod "45d5cd14-1313-40b6-a4b7-79cbe8d3d289" (UID: "45d5cd14-1313-40b6-a4b7-79cbe8d3d289"). InnerVolumeSpecName "kube-api-access-xr29d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.487380 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.487470 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xr29d\" (UniqueName: \"kubernetes.io/projected/45d5cd14-1313-40b6-a4b7-79cbe8d3d289-kube-api-access-xr29d\") on node \"crc\" DevicePath \"\"" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.717355 4647 generic.go:334] "Generic (PLEG): container finished" podID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerID="b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894" exitCode=0 Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.717462 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrlbb" event={"ID":"45d5cd14-1313-40b6-a4b7-79cbe8d3d289","Type":"ContainerDied","Data":"b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894"} Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.717507 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zrlbb" event={"ID":"45d5cd14-1313-40b6-a4b7-79cbe8d3d289","Type":"ContainerDied","Data":"11a13087e1b04af471b0e9071e8c679c1b8da056cbf31b70f75782bb675ec6bc"} Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.717534 4647 scope.go:117] "RemoveContainer" containerID="b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.717732 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zrlbb" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.749702 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zrlbb"] Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.752724 4647 scope.go:117] "RemoveContainer" containerID="2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.761875 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zrlbb"] Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.780283 4647 scope.go:117] "RemoveContainer" containerID="61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.829841 4647 scope.go:117] "RemoveContainer" containerID="b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894" Nov 28 16:08:40 crc kubenswrapper[4647]: E1128 16:08:40.830429 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894\": container with ID starting with b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894 not found: ID does not exist" containerID="b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.830467 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894"} err="failed to get container status \"b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894\": rpc error: code = NotFound desc = could not find container \"b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894\": container with ID starting with b2d3765b60f7053836727990bd52b28f10f97ad3d6a7dc180172c148fd5d4894 not found: ID does not exist" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.830494 4647 scope.go:117] "RemoveContainer" containerID="2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869" Nov 28 16:08:40 crc kubenswrapper[4647]: E1128 16:08:40.834271 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869\": container with ID starting with 2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869 not found: ID does not exist" containerID="2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.834301 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869"} err="failed to get container status \"2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869\": rpc error: code = NotFound desc = could not find container \"2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869\": container with ID starting with 2b7e30effb166bd64a4d033f9887442232e189f060ac9a8b76bae1bf94ab5869 not found: ID does not exist" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.834320 4647 scope.go:117] "RemoveContainer" containerID="61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234" Nov 28 16:08:40 crc kubenswrapper[4647]: E1128 16:08:40.836219 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234\": container with ID starting with 61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234 not found: ID does not exist" containerID="61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234" Nov 28 16:08:40 crc kubenswrapper[4647]: I1128 16:08:40.836247 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234"} err="failed to get container status \"61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234\": rpc error: code = NotFound desc = could not find container \"61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234\": container with ID starting with 61b2d23199a8f38de4dd61348ef04cc5231fdebc2944de9ce8670e12ad0e3234 not found: ID does not exist" Nov 28 16:08:42 crc kubenswrapper[4647]: I1128 16:08:42.409334 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" path="/var/lib/kubelet/pods/45d5cd14-1313-40b6-a4b7-79cbe8d3d289/volumes" Nov 28 16:08:47 crc kubenswrapper[4647]: I1128 16:08:47.022745 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:08:47 crc kubenswrapper[4647]: I1128 16:08:47.023539 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:09:17 crc kubenswrapper[4647]: I1128 16:09:17.022869 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:09:17 crc kubenswrapper[4647]: I1128 16:09:17.023420 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:09:17 crc kubenswrapper[4647]: I1128 16:09:17.023477 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:09:17 crc kubenswrapper[4647]: I1128 16:09:17.024223 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"300f92cccae17a8a17699dc960ff9faee3713835016c8656160147ae4942fdeb"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:09:17 crc kubenswrapper[4647]: I1128 16:09:17.024280 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://300f92cccae17a8a17699dc960ff9faee3713835016c8656160147ae4942fdeb" gracePeriod=600 Nov 28 16:09:18 crc kubenswrapper[4647]: I1128 16:09:18.115850 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="300f92cccae17a8a17699dc960ff9faee3713835016c8656160147ae4942fdeb" exitCode=0 Nov 28 16:09:18 crc kubenswrapper[4647]: I1128 16:09:18.116010 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"300f92cccae17a8a17699dc960ff9faee3713835016c8656160147ae4942fdeb"} Nov 28 16:09:18 crc kubenswrapper[4647]: I1128 16:09:18.116520 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4"} Nov 28 16:09:18 crc kubenswrapper[4647]: I1128 16:09:18.116552 4647 scope.go:117] "RemoveContainer" containerID="235e2e25bb41871832f5ee5026fc51f730cd0ba31c80c025c46eaa1ebd613e3a" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.223623 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zrgh8"] Nov 28 16:10:11 crc kubenswrapper[4647]: E1128 16:10:11.224723 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerName="extract-content" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.224739 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerName="extract-content" Nov 28 16:10:11 crc kubenswrapper[4647]: E1128 16:10:11.224778 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerName="extract-utilities" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.224787 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerName="extract-utilities" Nov 28 16:10:11 crc kubenswrapper[4647]: E1128 16:10:11.224807 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerName="registry-server" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.224815 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerName="registry-server" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.225070 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="45d5cd14-1313-40b6-a4b7-79cbe8d3d289" containerName="registry-server" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.226708 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.241356 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zrgh8"] Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.412989 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-catalog-content\") pod \"certified-operators-zrgh8\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.413750 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-utilities\") pod \"certified-operators-zrgh8\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.413845 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-472m9\" (UniqueName: \"kubernetes.io/projected/d5dc1206-ee34-4a09-9487-705b31ccdea3-kube-api-access-472m9\") pod \"certified-operators-zrgh8\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.516491 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-utilities\") pod \"certified-operators-zrgh8\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.516534 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-472m9\" (UniqueName: \"kubernetes.io/projected/d5dc1206-ee34-4a09-9487-705b31ccdea3-kube-api-access-472m9\") pod \"certified-operators-zrgh8\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.516945 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-utilities\") pod \"certified-operators-zrgh8\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.517058 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-catalog-content\") pod \"certified-operators-zrgh8\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.517347 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-catalog-content\") pod \"certified-operators-zrgh8\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.541570 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-472m9\" (UniqueName: \"kubernetes.io/projected/d5dc1206-ee34-4a09-9487-705b31ccdea3-kube-api-access-472m9\") pod \"certified-operators-zrgh8\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.566146 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:11 crc kubenswrapper[4647]: I1128 16:10:11.962140 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zrgh8"] Nov 28 16:10:12 crc kubenswrapper[4647]: I1128 16:10:12.675538 4647 generic.go:334] "Generic (PLEG): container finished" podID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerID="ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757" exitCode=0 Nov 28 16:10:12 crc kubenswrapper[4647]: I1128 16:10:12.675649 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrgh8" event={"ID":"d5dc1206-ee34-4a09-9487-705b31ccdea3","Type":"ContainerDied","Data":"ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757"} Nov 28 16:10:12 crc kubenswrapper[4647]: I1128 16:10:12.675965 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrgh8" event={"ID":"d5dc1206-ee34-4a09-9487-705b31ccdea3","Type":"ContainerStarted","Data":"ca890d5978e66736c6b7e68775c2e9f4ba6e35c9ce11820d991a20f66010c258"} Nov 28 16:10:14 crc kubenswrapper[4647]: I1128 16:10:14.718277 4647 generic.go:334] "Generic (PLEG): container finished" podID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerID="31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5" exitCode=0 Nov 28 16:10:14 crc kubenswrapper[4647]: I1128 16:10:14.718463 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrgh8" event={"ID":"d5dc1206-ee34-4a09-9487-705b31ccdea3","Type":"ContainerDied","Data":"31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5"} Nov 28 16:10:15 crc kubenswrapper[4647]: I1128 16:10:15.732115 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrgh8" event={"ID":"d5dc1206-ee34-4a09-9487-705b31ccdea3","Type":"ContainerStarted","Data":"d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e"} Nov 28 16:10:15 crc kubenswrapper[4647]: I1128 16:10:15.756384 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zrgh8" podStartSLOduration=2.223286761 podStartE2EDuration="4.756367022s" podCreationTimestamp="2025-11-28 16:10:11 +0000 UTC" firstStartedPulling="2025-11-28 16:10:12.677271209 +0000 UTC m=+2742.524877630" lastFinishedPulling="2025-11-28 16:10:15.21035143 +0000 UTC m=+2745.057957891" observedRunningTime="2025-11-28 16:10:15.752760936 +0000 UTC m=+2745.600367357" watchObservedRunningTime="2025-11-28 16:10:15.756367022 +0000 UTC m=+2745.603973443" Nov 28 16:10:21 crc kubenswrapper[4647]: I1128 16:10:21.566676 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:21 crc kubenswrapper[4647]: I1128 16:10:21.568316 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:21 crc kubenswrapper[4647]: I1128 16:10:21.645322 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:21 crc kubenswrapper[4647]: I1128 16:10:21.854118 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:21 crc kubenswrapper[4647]: I1128 16:10:21.903857 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zrgh8"] Nov 28 16:10:23 crc kubenswrapper[4647]: I1128 16:10:23.803882 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zrgh8" podUID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerName="registry-server" containerID="cri-o://d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e" gracePeriod=2 Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.356250 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.438979 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-472m9\" (UniqueName: \"kubernetes.io/projected/d5dc1206-ee34-4a09-9487-705b31ccdea3-kube-api-access-472m9\") pod \"d5dc1206-ee34-4a09-9487-705b31ccdea3\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.439061 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-utilities\") pod \"d5dc1206-ee34-4a09-9487-705b31ccdea3\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.439176 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-catalog-content\") pod \"d5dc1206-ee34-4a09-9487-705b31ccdea3\" (UID: \"d5dc1206-ee34-4a09-9487-705b31ccdea3\") " Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.440856 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-utilities" (OuterVolumeSpecName: "utilities") pod "d5dc1206-ee34-4a09-9487-705b31ccdea3" (UID: "d5dc1206-ee34-4a09-9487-705b31ccdea3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.448906 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5dc1206-ee34-4a09-9487-705b31ccdea3-kube-api-access-472m9" (OuterVolumeSpecName: "kube-api-access-472m9") pod "d5dc1206-ee34-4a09-9487-705b31ccdea3" (UID: "d5dc1206-ee34-4a09-9487-705b31ccdea3"). InnerVolumeSpecName "kube-api-access-472m9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.495212 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d5dc1206-ee34-4a09-9487-705b31ccdea3" (UID: "d5dc1206-ee34-4a09-9487-705b31ccdea3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.541982 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-472m9\" (UniqueName: \"kubernetes.io/projected/d5dc1206-ee34-4a09-9487-705b31ccdea3-kube-api-access-472m9\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.542018 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.542042 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5dc1206-ee34-4a09-9487-705b31ccdea3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.817929 4647 generic.go:334] "Generic (PLEG): container finished" podID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerID="d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e" exitCode=0 Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.818008 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrgh8" event={"ID":"d5dc1206-ee34-4a09-9487-705b31ccdea3","Type":"ContainerDied","Data":"d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e"} Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.818048 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrgh8" event={"ID":"d5dc1206-ee34-4a09-9487-705b31ccdea3","Type":"ContainerDied","Data":"ca890d5978e66736c6b7e68775c2e9f4ba6e35c9ce11820d991a20f66010c258"} Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.818070 4647 scope.go:117] "RemoveContainer" containerID="d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.819168 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrgh8" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.872014 4647 scope.go:117] "RemoveContainer" containerID="31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.896263 4647 scope.go:117] "RemoveContainer" containerID="ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.900114 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zrgh8"] Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.910263 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zrgh8"] Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.948074 4647 scope.go:117] "RemoveContainer" containerID="d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e" Nov 28 16:10:24 crc kubenswrapper[4647]: E1128 16:10:24.948762 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e\": container with ID starting with d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e not found: ID does not exist" containerID="d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.948881 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e"} err="failed to get container status \"d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e\": rpc error: code = NotFound desc = could not find container \"d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e\": container with ID starting with d4778768d2193fa61c2c6229abfaf4dfc0b2006077a67c90e779f3d245d4b21e not found: ID does not exist" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.948973 4647 scope.go:117] "RemoveContainer" containerID="31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5" Nov 28 16:10:24 crc kubenswrapper[4647]: E1128 16:10:24.949368 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5\": container with ID starting with 31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5 not found: ID does not exist" containerID="31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.949427 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5"} err="failed to get container status \"31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5\": rpc error: code = NotFound desc = could not find container \"31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5\": container with ID starting with 31d2d96634c4c63c604fde28818a69f886f324807a526c0ad226c78f09738ea5 not found: ID does not exist" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.949456 4647 scope.go:117] "RemoveContainer" containerID="ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757" Nov 28 16:10:24 crc kubenswrapper[4647]: E1128 16:10:24.950854 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757\": container with ID starting with ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757 not found: ID does not exist" containerID="ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757" Nov 28 16:10:24 crc kubenswrapper[4647]: I1128 16:10:24.950894 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757"} err="failed to get container status \"ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757\": rpc error: code = NotFound desc = could not find container \"ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757\": container with ID starting with ac80d8dce504ed0050e3d0ce8ff436f38c24b665045ad2f749fd607b89a0c757 not found: ID does not exist" Nov 28 16:10:26 crc kubenswrapper[4647]: I1128 16:10:26.417528 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5dc1206-ee34-4a09-9487-705b31ccdea3" path="/var/lib/kubelet/pods/d5dc1206-ee34-4a09-9487-705b31ccdea3/volumes" Nov 28 16:10:47 crc kubenswrapper[4647]: I1128 16:10:47.784001 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-48tgm"] Nov 28 16:10:47 crc kubenswrapper[4647]: E1128 16:10:47.785156 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerName="registry-server" Nov 28 16:10:47 crc kubenswrapper[4647]: I1128 16:10:47.785174 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerName="registry-server" Nov 28 16:10:47 crc kubenswrapper[4647]: E1128 16:10:47.785198 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerName="extract-content" Nov 28 16:10:47 crc kubenswrapper[4647]: I1128 16:10:47.785205 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerName="extract-content" Nov 28 16:10:47 crc kubenswrapper[4647]: E1128 16:10:47.785227 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerName="extract-utilities" Nov 28 16:10:47 crc kubenswrapper[4647]: I1128 16:10:47.785235 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerName="extract-utilities" Nov 28 16:10:47 crc kubenswrapper[4647]: I1128 16:10:47.785513 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5dc1206-ee34-4a09-9487-705b31ccdea3" containerName="registry-server" Nov 28 16:10:47 crc kubenswrapper[4647]: I1128 16:10:47.787400 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:47 crc kubenswrapper[4647]: I1128 16:10:47.795718 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-48tgm"] Nov 28 16:10:47 crc kubenswrapper[4647]: I1128 16:10:47.911060 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-catalog-content\") pod \"community-operators-48tgm\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:47 crc kubenswrapper[4647]: I1128 16:10:47.911124 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-utilities\") pod \"community-operators-48tgm\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:47 crc kubenswrapper[4647]: I1128 16:10:47.911168 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2s4f\" (UniqueName: \"kubernetes.io/projected/5683758b-47c5-4cfc-bae1-6d609997ff31-kube-api-access-d2s4f\") pod \"community-operators-48tgm\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:48 crc kubenswrapper[4647]: I1128 16:10:48.013143 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2s4f\" (UniqueName: \"kubernetes.io/projected/5683758b-47c5-4cfc-bae1-6d609997ff31-kube-api-access-d2s4f\") pod \"community-operators-48tgm\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:48 crc kubenswrapper[4647]: I1128 16:10:48.013343 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-catalog-content\") pod \"community-operators-48tgm\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:48 crc kubenswrapper[4647]: I1128 16:10:48.013379 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-utilities\") pod \"community-operators-48tgm\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:48 crc kubenswrapper[4647]: I1128 16:10:48.014009 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-utilities\") pod \"community-operators-48tgm\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:48 crc kubenswrapper[4647]: I1128 16:10:48.014079 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-catalog-content\") pod \"community-operators-48tgm\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:48 crc kubenswrapper[4647]: I1128 16:10:48.035982 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2s4f\" (UniqueName: \"kubernetes.io/projected/5683758b-47c5-4cfc-bae1-6d609997ff31-kube-api-access-d2s4f\") pod \"community-operators-48tgm\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:48 crc kubenswrapper[4647]: I1128 16:10:48.138305 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:48 crc kubenswrapper[4647]: I1128 16:10:48.721167 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-48tgm"] Nov 28 16:10:49 crc kubenswrapper[4647]: I1128 16:10:49.149216 4647 generic.go:334] "Generic (PLEG): container finished" podID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerID="f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514" exitCode=0 Nov 28 16:10:49 crc kubenswrapper[4647]: I1128 16:10:49.149296 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48tgm" event={"ID":"5683758b-47c5-4cfc-bae1-6d609997ff31","Type":"ContainerDied","Data":"f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514"} Nov 28 16:10:49 crc kubenswrapper[4647]: I1128 16:10:49.149573 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48tgm" event={"ID":"5683758b-47c5-4cfc-bae1-6d609997ff31","Type":"ContainerStarted","Data":"e80f170a82d51ec26ad785f41d37b1bca5167d35325b00b7e7211d1e4dcd4935"} Nov 28 16:10:51 crc kubenswrapper[4647]: I1128 16:10:51.173770 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48tgm" event={"ID":"5683758b-47c5-4cfc-bae1-6d609997ff31","Type":"ContainerStarted","Data":"39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471"} Nov 28 16:10:53 crc kubenswrapper[4647]: I1128 16:10:53.205378 4647 generic.go:334] "Generic (PLEG): container finished" podID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerID="39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471" exitCode=0 Nov 28 16:10:53 crc kubenswrapper[4647]: I1128 16:10:53.205538 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48tgm" event={"ID":"5683758b-47c5-4cfc-bae1-6d609997ff31","Type":"ContainerDied","Data":"39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471"} Nov 28 16:10:55 crc kubenswrapper[4647]: I1128 16:10:55.230173 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48tgm" event={"ID":"5683758b-47c5-4cfc-bae1-6d609997ff31","Type":"ContainerStarted","Data":"b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6"} Nov 28 16:10:55 crc kubenswrapper[4647]: I1128 16:10:55.260204 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-48tgm" podStartSLOduration=3.051407719 podStartE2EDuration="8.260174892s" podCreationTimestamp="2025-11-28 16:10:47 +0000 UTC" firstStartedPulling="2025-11-28 16:10:49.151538745 +0000 UTC m=+2778.999145176" lastFinishedPulling="2025-11-28 16:10:54.360305928 +0000 UTC m=+2784.207912349" observedRunningTime="2025-11-28 16:10:55.251298035 +0000 UTC m=+2785.098904456" watchObservedRunningTime="2025-11-28 16:10:55.260174892 +0000 UTC m=+2785.107781313" Nov 28 16:10:58 crc kubenswrapper[4647]: I1128 16:10:58.139558 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:58 crc kubenswrapper[4647]: I1128 16:10:58.140983 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:10:58 crc kubenswrapper[4647]: I1128 16:10:58.233933 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:11:07 crc kubenswrapper[4647]: I1128 16:11:07.345212 4647 generic.go:334] "Generic (PLEG): container finished" podID="338f9128-79ea-4cda-b4e8-7664e6057225" containerID="cf07771af9bb5e721e5a01bddfbb8dda95ff487d18d2496880a33bb7f7d797f8" exitCode=0 Nov 28 16:11:07 crc kubenswrapper[4647]: I1128 16:11:07.345318 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" event={"ID":"338f9128-79ea-4cda-b4e8-7664e6057225","Type":"ContainerDied","Data":"cf07771af9bb5e721e5a01bddfbb8dda95ff487d18d2496880a33bb7f7d797f8"} Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.182659 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.224392 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-48tgm"] Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.355351 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-48tgm" podUID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerName="registry-server" containerID="cri-o://b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6" gracePeriod=2 Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.865843 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.981744 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-ssh-key\") pod \"338f9128-79ea-4cda-b4e8-7664e6057225\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.981789 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-combined-ca-bundle\") pod \"338f9128-79ea-4cda-b4e8-7664e6057225\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.981853 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-inventory\") pod \"338f9128-79ea-4cda-b4e8-7664e6057225\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.981882 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-0\") pod \"338f9128-79ea-4cda-b4e8-7664e6057225\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.981943 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5khwx\" (UniqueName: \"kubernetes.io/projected/338f9128-79ea-4cda-b4e8-7664e6057225-kube-api-access-5khwx\") pod \"338f9128-79ea-4cda-b4e8-7664e6057225\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.981991 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/338f9128-79ea-4cda-b4e8-7664e6057225-nova-extra-config-0\") pod \"338f9128-79ea-4cda-b4e8-7664e6057225\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.982055 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-1\") pod \"338f9128-79ea-4cda-b4e8-7664e6057225\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.982103 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-1\") pod \"338f9128-79ea-4cda-b4e8-7664e6057225\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.982166 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-0\") pod \"338f9128-79ea-4cda-b4e8-7664e6057225\" (UID: \"338f9128-79ea-4cda-b4e8-7664e6057225\") " Nov 28 16:11:08 crc kubenswrapper[4647]: I1128 16:11:08.990790 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "338f9128-79ea-4cda-b4e8-7664e6057225" (UID: "338f9128-79ea-4cda-b4e8-7664e6057225"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.005955 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/338f9128-79ea-4cda-b4e8-7664e6057225-kube-api-access-5khwx" (OuterVolumeSpecName: "kube-api-access-5khwx") pod "338f9128-79ea-4cda-b4e8-7664e6057225" (UID: "338f9128-79ea-4cda-b4e8-7664e6057225"). InnerVolumeSpecName "kube-api-access-5khwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.034165 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/338f9128-79ea-4cda-b4e8-7664e6057225-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "338f9128-79ea-4cda-b4e8-7664e6057225" (UID: "338f9128-79ea-4cda-b4e8-7664e6057225"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.040357 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-inventory" (OuterVolumeSpecName: "inventory") pod "338f9128-79ea-4cda-b4e8-7664e6057225" (UID: "338f9128-79ea-4cda-b4e8-7664e6057225"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.044966 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.057130 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "338f9128-79ea-4cda-b4e8-7664e6057225" (UID: "338f9128-79ea-4cda-b4e8-7664e6057225"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.068098 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "338f9128-79ea-4cda-b4e8-7664e6057225" (UID: "338f9128-79ea-4cda-b4e8-7664e6057225"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.084610 4647 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.084639 4647 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.084649 4647 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.084659 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.084667 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5khwx\" (UniqueName: \"kubernetes.io/projected/338f9128-79ea-4cda-b4e8-7664e6057225-kube-api-access-5khwx\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.084677 4647 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/338f9128-79ea-4cda-b4e8-7664e6057225-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.102549 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "338f9128-79ea-4cda-b4e8-7664e6057225" (UID: "338f9128-79ea-4cda-b4e8-7664e6057225"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.104148 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "338f9128-79ea-4cda-b4e8-7664e6057225" (UID: "338f9128-79ea-4cda-b4e8-7664e6057225"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.118534 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "338f9128-79ea-4cda-b4e8-7664e6057225" (UID: "338f9128-79ea-4cda-b4e8-7664e6057225"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.185637 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-utilities\") pod \"5683758b-47c5-4cfc-bae1-6d609997ff31\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.185797 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2s4f\" (UniqueName: \"kubernetes.io/projected/5683758b-47c5-4cfc-bae1-6d609997ff31-kube-api-access-d2s4f\") pod \"5683758b-47c5-4cfc-bae1-6d609997ff31\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.185850 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-catalog-content\") pod \"5683758b-47c5-4cfc-bae1-6d609997ff31\" (UID: \"5683758b-47c5-4cfc-bae1-6d609997ff31\") " Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.186328 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.186345 4647 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.186356 4647 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/338f9128-79ea-4cda-b4e8-7664e6057225-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.186568 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-utilities" (OuterVolumeSpecName: "utilities") pod "5683758b-47c5-4cfc-bae1-6d609997ff31" (UID: "5683758b-47c5-4cfc-bae1-6d609997ff31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.190305 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5683758b-47c5-4cfc-bae1-6d609997ff31-kube-api-access-d2s4f" (OuterVolumeSpecName: "kube-api-access-d2s4f") pod "5683758b-47c5-4cfc-bae1-6d609997ff31" (UID: "5683758b-47c5-4cfc-bae1-6d609997ff31"). InnerVolumeSpecName "kube-api-access-d2s4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.245056 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5683758b-47c5-4cfc-bae1-6d609997ff31" (UID: "5683758b-47c5-4cfc-bae1-6d609997ff31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.287782 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.287822 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2s4f\" (UniqueName: \"kubernetes.io/projected/5683758b-47c5-4cfc-bae1-6d609997ff31-kube-api-access-d2s4f\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.287834 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5683758b-47c5-4cfc-bae1-6d609997ff31-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.364881 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" event={"ID":"338f9128-79ea-4cda-b4e8-7664e6057225","Type":"ContainerDied","Data":"6e57f6e2e84750641da337407b81c3a6c49435d2a6364e8a7e9ebc762e1c260e"} Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.364939 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e57f6e2e84750641da337407b81c3a6c49435d2a6364e8a7e9ebc762e1c260e" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.366202 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-p69xl" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.370739 4647 generic.go:334] "Generic (PLEG): container finished" podID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerID="b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6" exitCode=0 Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.370790 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48tgm" event={"ID":"5683758b-47c5-4cfc-bae1-6d609997ff31","Type":"ContainerDied","Data":"b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6"} Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.370820 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-48tgm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.370839 4647 scope.go:117] "RemoveContainer" containerID="b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.370828 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48tgm" event={"ID":"5683758b-47c5-4cfc-bae1-6d609997ff31","Type":"ContainerDied","Data":"e80f170a82d51ec26ad785f41d37b1bca5167d35325b00b7e7211d1e4dcd4935"} Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.395875 4647 scope.go:117] "RemoveContainer" containerID="39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.420803 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-48tgm"] Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.434117 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-48tgm"] Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.439750 4647 scope.go:117] "RemoveContainer" containerID="f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.458430 4647 scope.go:117] "RemoveContainer" containerID="b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6" Nov 28 16:11:09 crc kubenswrapper[4647]: E1128 16:11:09.459009 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6\": container with ID starting with b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6 not found: ID does not exist" containerID="b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.459048 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6"} err="failed to get container status \"b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6\": rpc error: code = NotFound desc = could not find container \"b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6\": container with ID starting with b7555f658caffdbe3fa1710735a7e205147991cf8f865f46f6f18ae22161adc6 not found: ID does not exist" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.459075 4647 scope.go:117] "RemoveContainer" containerID="39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471" Nov 28 16:11:09 crc kubenswrapper[4647]: E1128 16:11:09.459402 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471\": container with ID starting with 39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471 not found: ID does not exist" containerID="39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.459489 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471"} err="failed to get container status \"39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471\": rpc error: code = NotFound desc = could not find container \"39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471\": container with ID starting with 39f79583a5816892a39caf8a7d1a29936ba676bce7b7e447c3ec9bed7117e471 not found: ID does not exist" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.459501 4647 scope.go:117] "RemoveContainer" containerID="f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514" Nov 28 16:11:09 crc kubenswrapper[4647]: E1128 16:11:09.459770 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514\": container with ID starting with f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514 not found: ID does not exist" containerID="f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.459815 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514"} err="failed to get container status \"f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514\": rpc error: code = NotFound desc = could not find container \"f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514\": container with ID starting with f089de9b828858f8528e0430ec1ad9a89dd2bf5cdebfd828888889a36501e514 not found: ID does not exist" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.588730 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm"] Nov 28 16:11:09 crc kubenswrapper[4647]: E1128 16:11:09.589711 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerName="registry-server" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.589837 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerName="registry-server" Nov 28 16:11:09 crc kubenswrapper[4647]: E1128 16:11:09.589925 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerName="extract-utilities" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.589984 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerName="extract-utilities" Nov 28 16:11:09 crc kubenswrapper[4647]: E1128 16:11:09.590052 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="338f9128-79ea-4cda-b4e8-7664e6057225" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.590115 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="338f9128-79ea-4cda-b4e8-7664e6057225" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 28 16:11:09 crc kubenswrapper[4647]: E1128 16:11:09.590184 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerName="extract-content" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.590262 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerName="extract-content" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.590615 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="338f9128-79ea-4cda-b4e8-7664e6057225" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.590704 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="5683758b-47c5-4cfc-bae1-6d609997ff31" containerName="registry-server" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.591585 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.594832 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.595091 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-78nk6" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.595301 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.597074 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.597970 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.610803 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm"] Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.704919 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.704969 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.705134 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.705200 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56b6v\" (UniqueName: \"kubernetes.io/projected/c1d8e071-fad7-4b8d-8637-e7be304c4c86-kube-api-access-56b6v\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.705250 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.705361 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.705396 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.807268 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.807342 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.807384 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.807478 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.807497 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.807550 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.807572 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56b6v\" (UniqueName: \"kubernetes.io/projected/c1d8e071-fad7-4b8d-8637-e7be304c4c86-kube-api-access-56b6v\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.811738 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.812291 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.812778 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.823951 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.824207 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.838992 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.839080 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56b6v\" (UniqueName: \"kubernetes.io/projected/c1d8e071-fad7-4b8d-8637-e7be304c4c86-kube-api-access-56b6v\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wghtm\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:09 crc kubenswrapper[4647]: I1128 16:11:09.922406 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:11:10 crc kubenswrapper[4647]: I1128 16:11:10.413938 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5683758b-47c5-4cfc-bae1-6d609997ff31" path="/var/lib/kubelet/pods/5683758b-47c5-4cfc-bae1-6d609997ff31/volumes" Nov 28 16:11:10 crc kubenswrapper[4647]: I1128 16:11:10.573726 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm"] Nov 28 16:11:11 crc kubenswrapper[4647]: I1128 16:11:11.435370 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" event={"ID":"c1d8e071-fad7-4b8d-8637-e7be304c4c86","Type":"ContainerStarted","Data":"038d8ed93a51e17066572494f08f17cff63cdd4d0e2ccafa192916eef4cf5afa"} Nov 28 16:11:12 crc kubenswrapper[4647]: I1128 16:11:12.446455 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" event={"ID":"c1d8e071-fad7-4b8d-8637-e7be304c4c86","Type":"ContainerStarted","Data":"891cead96ba6492d3a98c5cbd64b4dec47f867b3c2078a87bf8e1901230c0588"} Nov 28 16:11:12 crc kubenswrapper[4647]: I1128 16:11:12.477959 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" podStartSLOduration=2.7548489959999998 podStartE2EDuration="3.477922947s" podCreationTimestamp="2025-11-28 16:11:09 +0000 UTC" firstStartedPulling="2025-11-28 16:11:10.582877826 +0000 UTC m=+2800.430484247" lastFinishedPulling="2025-11-28 16:11:11.305951767 +0000 UTC m=+2801.153558198" observedRunningTime="2025-11-28 16:11:12.473213662 +0000 UTC m=+2802.320820093" watchObservedRunningTime="2025-11-28 16:11:12.477922947 +0000 UTC m=+2802.325529368" Nov 28 16:11:17 crc kubenswrapper[4647]: I1128 16:11:17.022838 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:11:17 crc kubenswrapper[4647]: I1128 16:11:17.023121 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.477822 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9q86k"] Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.481242 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.488511 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9q86k"] Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.661167 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4mwr\" (UniqueName: \"kubernetes.io/projected/649b4906-6200-441d-a3fd-c6088a4df300-kube-api-access-b4mwr\") pod \"redhat-operators-9q86k\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.661899 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-utilities\") pod \"redhat-operators-9q86k\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.661948 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-catalog-content\") pod \"redhat-operators-9q86k\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.764932 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4mwr\" (UniqueName: \"kubernetes.io/projected/649b4906-6200-441d-a3fd-c6088a4df300-kube-api-access-b4mwr\") pod \"redhat-operators-9q86k\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.765079 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-utilities\") pod \"redhat-operators-9q86k\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.765117 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-catalog-content\") pod \"redhat-operators-9q86k\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.765866 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-catalog-content\") pod \"redhat-operators-9q86k\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.766695 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-utilities\") pod \"redhat-operators-9q86k\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.789272 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4mwr\" (UniqueName: \"kubernetes.io/projected/649b4906-6200-441d-a3fd-c6088a4df300-kube-api-access-b4mwr\") pod \"redhat-operators-9q86k\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:37 crc kubenswrapper[4647]: I1128 16:11:37.821659 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:38 crc kubenswrapper[4647]: I1128 16:11:38.359589 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9q86k"] Nov 28 16:11:38 crc kubenswrapper[4647]: I1128 16:11:38.739190 4647 generic.go:334] "Generic (PLEG): container finished" podID="649b4906-6200-441d-a3fd-c6088a4df300" containerID="89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454" exitCode=0 Nov 28 16:11:38 crc kubenswrapper[4647]: I1128 16:11:38.739379 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9q86k" event={"ID":"649b4906-6200-441d-a3fd-c6088a4df300","Type":"ContainerDied","Data":"89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454"} Nov 28 16:11:38 crc kubenswrapper[4647]: I1128 16:11:38.739452 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9q86k" event={"ID":"649b4906-6200-441d-a3fd-c6088a4df300","Type":"ContainerStarted","Data":"04d731e67bc7a49c2fa66532d09de960260b35727bb067a9e3a507e9852189e9"} Nov 28 16:11:40 crc kubenswrapper[4647]: I1128 16:11:40.767156 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9q86k" event={"ID":"649b4906-6200-441d-a3fd-c6088a4df300","Type":"ContainerStarted","Data":"947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f"} Nov 28 16:11:43 crc kubenswrapper[4647]: I1128 16:11:43.797962 4647 generic.go:334] "Generic (PLEG): container finished" podID="649b4906-6200-441d-a3fd-c6088a4df300" containerID="947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f" exitCode=0 Nov 28 16:11:43 crc kubenswrapper[4647]: I1128 16:11:43.798065 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9q86k" event={"ID":"649b4906-6200-441d-a3fd-c6088a4df300","Type":"ContainerDied","Data":"947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f"} Nov 28 16:11:44 crc kubenswrapper[4647]: I1128 16:11:44.810540 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9q86k" event={"ID":"649b4906-6200-441d-a3fd-c6088a4df300","Type":"ContainerStarted","Data":"e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7"} Nov 28 16:11:44 crc kubenswrapper[4647]: I1128 16:11:44.845272 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9q86k" podStartSLOduration=2.31878448 podStartE2EDuration="7.845254964s" podCreationTimestamp="2025-11-28 16:11:37 +0000 UTC" firstStartedPulling="2025-11-28 16:11:38.74165056 +0000 UTC m=+2828.589256981" lastFinishedPulling="2025-11-28 16:11:44.268121024 +0000 UTC m=+2834.115727465" observedRunningTime="2025-11-28 16:11:44.838059842 +0000 UTC m=+2834.685666303" watchObservedRunningTime="2025-11-28 16:11:44.845254964 +0000 UTC m=+2834.692861385" Nov 28 16:11:47 crc kubenswrapper[4647]: I1128 16:11:47.022914 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:11:47 crc kubenswrapper[4647]: I1128 16:11:47.023300 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:11:47 crc kubenswrapper[4647]: I1128 16:11:47.823138 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:47 crc kubenswrapper[4647]: I1128 16:11:47.823625 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:48 crc kubenswrapper[4647]: I1128 16:11:48.886847 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9q86k" podUID="649b4906-6200-441d-a3fd-c6088a4df300" containerName="registry-server" probeResult="failure" output=< Nov 28 16:11:48 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 16:11:48 crc kubenswrapper[4647]: > Nov 28 16:11:57 crc kubenswrapper[4647]: I1128 16:11:57.903603 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:57 crc kubenswrapper[4647]: I1128 16:11:57.997126 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:58 crc kubenswrapper[4647]: I1128 16:11:58.155096 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9q86k"] Nov 28 16:11:58 crc kubenswrapper[4647]: I1128 16:11:58.962669 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9q86k" podUID="649b4906-6200-441d-a3fd-c6088a4df300" containerName="registry-server" containerID="cri-o://e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7" gracePeriod=2 Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.420434 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.466183 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4mwr\" (UniqueName: \"kubernetes.io/projected/649b4906-6200-441d-a3fd-c6088a4df300-kube-api-access-b4mwr\") pod \"649b4906-6200-441d-a3fd-c6088a4df300\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.466390 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-utilities\") pod \"649b4906-6200-441d-a3fd-c6088a4df300\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.466475 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-catalog-content\") pod \"649b4906-6200-441d-a3fd-c6088a4df300\" (UID: \"649b4906-6200-441d-a3fd-c6088a4df300\") " Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.467323 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-utilities" (OuterVolumeSpecName: "utilities") pod "649b4906-6200-441d-a3fd-c6088a4df300" (UID: "649b4906-6200-441d-a3fd-c6088a4df300"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.475438 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/649b4906-6200-441d-a3fd-c6088a4df300-kube-api-access-b4mwr" (OuterVolumeSpecName: "kube-api-access-b4mwr") pod "649b4906-6200-441d-a3fd-c6088a4df300" (UID: "649b4906-6200-441d-a3fd-c6088a4df300"). InnerVolumeSpecName "kube-api-access-b4mwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.568930 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.569232 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4mwr\" (UniqueName: \"kubernetes.io/projected/649b4906-6200-441d-a3fd-c6088a4df300-kube-api-access-b4mwr\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.589952 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "649b4906-6200-441d-a3fd-c6088a4df300" (UID: "649b4906-6200-441d-a3fd-c6088a4df300"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.670722 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/649b4906-6200-441d-a3fd-c6088a4df300-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.975667 4647 generic.go:334] "Generic (PLEG): container finished" podID="649b4906-6200-441d-a3fd-c6088a4df300" containerID="e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7" exitCode=0 Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.975803 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9q86k" event={"ID":"649b4906-6200-441d-a3fd-c6088a4df300","Type":"ContainerDied","Data":"e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7"} Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.976585 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9q86k" event={"ID":"649b4906-6200-441d-a3fd-c6088a4df300","Type":"ContainerDied","Data":"04d731e67bc7a49c2fa66532d09de960260b35727bb067a9e3a507e9852189e9"} Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.976627 4647 scope.go:117] "RemoveContainer" containerID="e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7" Nov 28 16:11:59 crc kubenswrapper[4647]: I1128 16:11:59.975816 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9q86k" Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.008699 4647 scope.go:117] "RemoveContainer" containerID="947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f" Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.037007 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9q86k"] Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.040685 4647 scope.go:117] "RemoveContainer" containerID="89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454" Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.051352 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9q86k"] Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.078195 4647 scope.go:117] "RemoveContainer" containerID="e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7" Nov 28 16:12:00 crc kubenswrapper[4647]: E1128 16:12:00.078716 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7\": container with ID starting with e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7 not found: ID does not exist" containerID="e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7" Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.078788 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7"} err="failed to get container status \"e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7\": rpc error: code = NotFound desc = could not find container \"e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7\": container with ID starting with e4cac9312509dc7f4212c35e34cbe39ba7f32fa49e9b9d6300d1cd1d7d6d80a7 not found: ID does not exist" Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.078825 4647 scope.go:117] "RemoveContainer" containerID="947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f" Nov 28 16:12:00 crc kubenswrapper[4647]: E1128 16:12:00.079204 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f\": container with ID starting with 947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f not found: ID does not exist" containerID="947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f" Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.079235 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f"} err="failed to get container status \"947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f\": rpc error: code = NotFound desc = could not find container \"947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f\": container with ID starting with 947183b80bb6397e4ba1baf6379e652745e37442d88e98f0c9b7e08d8bf3851f not found: ID does not exist" Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.079254 4647 scope.go:117] "RemoveContainer" containerID="89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454" Nov 28 16:12:00 crc kubenswrapper[4647]: E1128 16:12:00.079527 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454\": container with ID starting with 89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454 not found: ID does not exist" containerID="89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454" Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.079566 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454"} err="failed to get container status \"89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454\": rpc error: code = NotFound desc = could not find container \"89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454\": container with ID starting with 89dfab1aec13fa9f34d30566aa4dde8e280da95f7878f7660bc73f6205467454 not found: ID does not exist" Nov 28 16:12:00 crc kubenswrapper[4647]: I1128 16:12:00.405441 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="649b4906-6200-441d-a3fd-c6088a4df300" path="/var/lib/kubelet/pods/649b4906-6200-441d-a3fd-c6088a4df300/volumes" Nov 28 16:12:17 crc kubenswrapper[4647]: I1128 16:12:17.022762 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:12:17 crc kubenswrapper[4647]: I1128 16:12:17.024684 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:12:17 crc kubenswrapper[4647]: I1128 16:12:17.024782 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:12:17 crc kubenswrapper[4647]: I1128 16:12:17.026005 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:12:17 crc kubenswrapper[4647]: I1128 16:12:17.026082 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" gracePeriod=600 Nov 28 16:12:17 crc kubenswrapper[4647]: E1128 16:12:17.166495 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:12:17 crc kubenswrapper[4647]: I1128 16:12:17.189917 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" exitCode=0 Nov 28 16:12:17 crc kubenswrapper[4647]: I1128 16:12:17.189983 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4"} Nov 28 16:12:17 crc kubenswrapper[4647]: I1128 16:12:17.190032 4647 scope.go:117] "RemoveContainer" containerID="300f92cccae17a8a17699dc960ff9faee3713835016c8656160147ae4942fdeb" Nov 28 16:12:17 crc kubenswrapper[4647]: I1128 16:12:17.190830 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:12:17 crc kubenswrapper[4647]: E1128 16:12:17.191197 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:12:17 crc kubenswrapper[4647]: E1128 16:12:17.291672 4647 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod008f163b_b2fe_4238_90b5_96f0d89f3fb5.slice/crio-conmon-3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:12:32 crc kubenswrapper[4647]: I1128 16:12:32.397709 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:12:32 crc kubenswrapper[4647]: E1128 16:12:32.399200 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:12:46 crc kubenswrapper[4647]: I1128 16:12:46.394949 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:12:46 crc kubenswrapper[4647]: E1128 16:12:46.396277 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:13:01 crc kubenswrapper[4647]: I1128 16:13:01.395325 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:13:01 crc kubenswrapper[4647]: E1128 16:13:01.396481 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:13:16 crc kubenswrapper[4647]: I1128 16:13:16.395193 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:13:16 crc kubenswrapper[4647]: E1128 16:13:16.396231 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:13:31 crc kubenswrapper[4647]: I1128 16:13:31.394814 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:13:31 crc kubenswrapper[4647]: E1128 16:13:31.395628 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:13:44 crc kubenswrapper[4647]: I1128 16:13:44.395145 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:13:44 crc kubenswrapper[4647]: E1128 16:13:44.396425 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:13:55 crc kubenswrapper[4647]: I1128 16:13:55.394803 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:13:55 crc kubenswrapper[4647]: E1128 16:13:55.395600 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:14:08 crc kubenswrapper[4647]: I1128 16:14:08.395367 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:14:08 crc kubenswrapper[4647]: E1128 16:14:08.396275 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:14:23 crc kubenswrapper[4647]: I1128 16:14:23.394195 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:14:23 crc kubenswrapper[4647]: E1128 16:14:23.395222 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:14:35 crc kubenswrapper[4647]: I1128 16:14:35.394099 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:14:35 crc kubenswrapper[4647]: E1128 16:14:35.394834 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:14:46 crc kubenswrapper[4647]: I1128 16:14:46.394888 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:14:46 crc kubenswrapper[4647]: E1128 16:14:46.395612 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:14:59 crc kubenswrapper[4647]: I1128 16:14:59.395594 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:14:59 crc kubenswrapper[4647]: E1128 16:14:59.396537 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.163382 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj"] Nov 28 16:15:00 crc kubenswrapper[4647]: E1128 16:15:00.163905 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="649b4906-6200-441d-a3fd-c6088a4df300" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.163921 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="649b4906-6200-441d-a3fd-c6088a4df300" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4647]: E1128 16:15:00.163935 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="649b4906-6200-441d-a3fd-c6088a4df300" containerName="extract-utilities" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.163944 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="649b4906-6200-441d-a3fd-c6088a4df300" containerName="extract-utilities" Nov 28 16:15:00 crc kubenswrapper[4647]: E1128 16:15:00.163979 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="649b4906-6200-441d-a3fd-c6088a4df300" containerName="extract-content" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.163987 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="649b4906-6200-441d-a3fd-c6088a4df300" containerName="extract-content" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.164215 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="649b4906-6200-441d-a3fd-c6088a4df300" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.165123 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.168530 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.168611 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.203514 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f968e9d-0276-495e-abc8-88f232ea1344-config-volume\") pod \"collect-profiles-29405775-9dzkj\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.203677 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f968e9d-0276-495e-abc8-88f232ea1344-secret-volume\") pod \"collect-profiles-29405775-9dzkj\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.203784 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8vm9\" (UniqueName: \"kubernetes.io/projected/5f968e9d-0276-495e-abc8-88f232ea1344-kube-api-access-t8vm9\") pod \"collect-profiles-29405775-9dzkj\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.206696 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj"] Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.305138 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8vm9\" (UniqueName: \"kubernetes.io/projected/5f968e9d-0276-495e-abc8-88f232ea1344-kube-api-access-t8vm9\") pod \"collect-profiles-29405775-9dzkj\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.306015 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f968e9d-0276-495e-abc8-88f232ea1344-config-volume\") pod \"collect-profiles-29405775-9dzkj\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.306478 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f968e9d-0276-495e-abc8-88f232ea1344-secret-volume\") pod \"collect-profiles-29405775-9dzkj\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.307337 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f968e9d-0276-495e-abc8-88f232ea1344-config-volume\") pod \"collect-profiles-29405775-9dzkj\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.316476 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f968e9d-0276-495e-abc8-88f232ea1344-secret-volume\") pod \"collect-profiles-29405775-9dzkj\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.336583 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8vm9\" (UniqueName: \"kubernetes.io/projected/5f968e9d-0276-495e-abc8-88f232ea1344-kube-api-access-t8vm9\") pod \"collect-profiles-29405775-9dzkj\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.494099 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:00 crc kubenswrapper[4647]: I1128 16:15:00.978762 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj"] Nov 28 16:15:01 crc kubenswrapper[4647]: I1128 16:15:01.093817 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" event={"ID":"5f968e9d-0276-495e-abc8-88f232ea1344","Type":"ContainerStarted","Data":"e201208ad86388535bd80a4209d7057f9d0d93ed2341c77abb6f7557f8575497"} Nov 28 16:15:01 crc kubenswrapper[4647]: E1128 16:15:01.835004 4647 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f968e9d_0276_495e_abc8_88f232ea1344.slice/crio-conmon-fa992f2eb32b9a62bdb02773c52b1f32a9f1203889ba282cb9c3d245c78a0230.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:15:02 crc kubenswrapper[4647]: I1128 16:15:02.105838 4647 generic.go:334] "Generic (PLEG): container finished" podID="5f968e9d-0276-495e-abc8-88f232ea1344" containerID="fa992f2eb32b9a62bdb02773c52b1f32a9f1203889ba282cb9c3d245c78a0230" exitCode=0 Nov 28 16:15:02 crc kubenswrapper[4647]: I1128 16:15:02.105952 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" event={"ID":"5f968e9d-0276-495e-abc8-88f232ea1344","Type":"ContainerDied","Data":"fa992f2eb32b9a62bdb02773c52b1f32a9f1203889ba282cb9c3d245c78a0230"} Nov 28 16:15:03 crc kubenswrapper[4647]: I1128 16:15:03.478583 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:03 crc kubenswrapper[4647]: I1128 16:15:03.589894 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f968e9d-0276-495e-abc8-88f232ea1344-secret-volume\") pod \"5f968e9d-0276-495e-abc8-88f232ea1344\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " Nov 28 16:15:03 crc kubenswrapper[4647]: I1128 16:15:03.590096 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f968e9d-0276-495e-abc8-88f232ea1344-config-volume\") pod \"5f968e9d-0276-495e-abc8-88f232ea1344\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " Nov 28 16:15:03 crc kubenswrapper[4647]: I1128 16:15:03.591266 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f968e9d-0276-495e-abc8-88f232ea1344-config-volume" (OuterVolumeSpecName: "config-volume") pod "5f968e9d-0276-495e-abc8-88f232ea1344" (UID: "5f968e9d-0276-495e-abc8-88f232ea1344"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:03 crc kubenswrapper[4647]: I1128 16:15:03.591371 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8vm9\" (UniqueName: \"kubernetes.io/projected/5f968e9d-0276-495e-abc8-88f232ea1344-kube-api-access-t8vm9\") pod \"5f968e9d-0276-495e-abc8-88f232ea1344\" (UID: \"5f968e9d-0276-495e-abc8-88f232ea1344\") " Nov 28 16:15:03 crc kubenswrapper[4647]: I1128 16:15:03.592811 4647 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f968e9d-0276-495e-abc8-88f232ea1344-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:03 crc kubenswrapper[4647]: I1128 16:15:03.612339 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f968e9d-0276-495e-abc8-88f232ea1344-kube-api-access-t8vm9" (OuterVolumeSpecName: "kube-api-access-t8vm9") pod "5f968e9d-0276-495e-abc8-88f232ea1344" (UID: "5f968e9d-0276-495e-abc8-88f232ea1344"). InnerVolumeSpecName "kube-api-access-t8vm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:03 crc kubenswrapper[4647]: I1128 16:15:03.615700 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f968e9d-0276-495e-abc8-88f232ea1344-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5f968e9d-0276-495e-abc8-88f232ea1344" (UID: "5f968e9d-0276-495e-abc8-88f232ea1344"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:03 crc kubenswrapper[4647]: I1128 16:15:03.695018 4647 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f968e9d-0276-495e-abc8-88f232ea1344-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:03 crc kubenswrapper[4647]: I1128 16:15:03.695470 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8vm9\" (UniqueName: \"kubernetes.io/projected/5f968e9d-0276-495e-abc8-88f232ea1344-kube-api-access-t8vm9\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:04 crc kubenswrapper[4647]: I1128 16:15:04.138748 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" event={"ID":"5f968e9d-0276-495e-abc8-88f232ea1344","Type":"ContainerDied","Data":"e201208ad86388535bd80a4209d7057f9d0d93ed2341c77abb6f7557f8575497"} Nov 28 16:15:04 crc kubenswrapper[4647]: I1128 16:15:04.138805 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e201208ad86388535bd80a4209d7057f9d0d93ed2341c77abb6f7557f8575497" Nov 28 16:15:04 crc kubenswrapper[4647]: I1128 16:15:04.138886 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj" Nov 28 16:15:04 crc kubenswrapper[4647]: I1128 16:15:04.581349 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9"] Nov 28 16:15:04 crc kubenswrapper[4647]: I1128 16:15:04.592088 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-m64h9"] Nov 28 16:15:06 crc kubenswrapper[4647]: I1128 16:15:06.415839 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb" path="/var/lib/kubelet/pods/7f0b3e80-18fc-4d02-92fd-bfc7ceaa97bb/volumes" Nov 28 16:15:11 crc kubenswrapper[4647]: I1128 16:15:11.395184 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:15:11 crc kubenswrapper[4647]: E1128 16:15:11.395677 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:15:13 crc kubenswrapper[4647]: I1128 16:15:13.227473 4647 generic.go:334] "Generic (PLEG): container finished" podID="c1d8e071-fad7-4b8d-8637-e7be304c4c86" containerID="891cead96ba6492d3a98c5cbd64b4dec47f867b3c2078a87bf8e1901230c0588" exitCode=0 Nov 28 16:15:13 crc kubenswrapper[4647]: I1128 16:15:13.227571 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" event={"ID":"c1d8e071-fad7-4b8d-8637-e7be304c4c86","Type":"ContainerDied","Data":"891cead96ba6492d3a98c5cbd64b4dec47f867b3c2078a87bf8e1901230c0588"} Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.752143 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.755025 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-1\") pod \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.755093 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56b6v\" (UniqueName: \"kubernetes.io/projected/c1d8e071-fad7-4b8d-8637-e7be304c4c86-kube-api-access-56b6v\") pod \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.755146 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-inventory\") pod \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.755192 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-telemetry-combined-ca-bundle\") pod \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.755267 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-0\") pod \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.755307 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-2\") pod \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.755334 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ssh-key\") pod \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\" (UID: \"c1d8e071-fad7-4b8d-8637-e7be304c4c86\") " Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.762170 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1d8e071-fad7-4b8d-8637-e7be304c4c86-kube-api-access-56b6v" (OuterVolumeSpecName: "kube-api-access-56b6v") pod "c1d8e071-fad7-4b8d-8637-e7be304c4c86" (UID: "c1d8e071-fad7-4b8d-8637-e7be304c4c86"). InnerVolumeSpecName "kube-api-access-56b6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.767803 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "c1d8e071-fad7-4b8d-8637-e7be304c4c86" (UID: "c1d8e071-fad7-4b8d-8637-e7be304c4c86"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.799731 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "c1d8e071-fad7-4b8d-8637-e7be304c4c86" (UID: "c1d8e071-fad7-4b8d-8637-e7be304c4c86"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.800976 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "c1d8e071-fad7-4b8d-8637-e7be304c4c86" (UID: "c1d8e071-fad7-4b8d-8637-e7be304c4c86"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.804827 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c1d8e071-fad7-4b8d-8637-e7be304c4c86" (UID: "c1d8e071-fad7-4b8d-8637-e7be304c4c86"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.808184 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-inventory" (OuterVolumeSpecName: "inventory") pod "c1d8e071-fad7-4b8d-8637-e7be304c4c86" (UID: "c1d8e071-fad7-4b8d-8637-e7be304c4c86"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.811880 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "c1d8e071-fad7-4b8d-8637-e7be304c4c86" (UID: "c1d8e071-fad7-4b8d-8637-e7be304c4c86"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.860182 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56b6v\" (UniqueName: \"kubernetes.io/projected/c1d8e071-fad7-4b8d-8637-e7be304c4c86-kube-api-access-56b6v\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.860625 4647 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.860694 4647 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.860765 4647 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.860846 4647 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.860929 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:14 crc kubenswrapper[4647]: I1128 16:15:14.861674 4647 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c1d8e071-fad7-4b8d-8637-e7be304c4c86-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:15 crc kubenswrapper[4647]: I1128 16:15:15.256559 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" event={"ID":"c1d8e071-fad7-4b8d-8637-e7be304c4c86","Type":"ContainerDied","Data":"038d8ed93a51e17066572494f08f17cff63cdd4d0e2ccafa192916eef4cf5afa"} Nov 28 16:15:15 crc kubenswrapper[4647]: I1128 16:15:15.256615 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="038d8ed93a51e17066572494f08f17cff63cdd4d0e2ccafa192916eef4cf5afa" Nov 28 16:15:15 crc kubenswrapper[4647]: I1128 16:15:15.256680 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wghtm" Nov 28 16:15:18 crc kubenswrapper[4647]: I1128 16:15:18.616737 4647 scope.go:117] "RemoveContainer" containerID="e0eae7dde42b8513288dfd8ae43953fc04795f89bf2dd645321a9c387ddf22dc" Nov 28 16:15:25 crc kubenswrapper[4647]: I1128 16:15:25.395532 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:15:25 crc kubenswrapper[4647]: E1128 16:15:25.396551 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:15:39 crc kubenswrapper[4647]: I1128 16:15:39.395252 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:15:39 crc kubenswrapper[4647]: E1128 16:15:39.397374 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:15:53 crc kubenswrapper[4647]: I1128 16:15:53.395376 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:15:53 crc kubenswrapper[4647]: E1128 16:15:53.396497 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:16:04 crc kubenswrapper[4647]: I1128 16:16:04.396047 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:16:04 crc kubenswrapper[4647]: E1128 16:16:04.396952 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:16:16 crc kubenswrapper[4647]: I1128 16:16:16.394550 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:16:16 crc kubenswrapper[4647]: E1128 16:16:16.395338 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.399894 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 28 16:16:17 crc kubenswrapper[4647]: E1128 16:16:17.400579 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f968e9d-0276-495e-abc8-88f232ea1344" containerName="collect-profiles" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.400607 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f968e9d-0276-495e-abc8-88f232ea1344" containerName="collect-profiles" Nov 28 16:16:17 crc kubenswrapper[4647]: E1128 16:16:17.400627 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1d8e071-fad7-4b8d-8637-e7be304c4c86" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.400638 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1d8e071-fad7-4b8d-8637-e7be304c4c86" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.400920 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f968e9d-0276-495e-abc8-88f232ea1344" containerName="collect-profiles" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.400940 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1d8e071-fad7-4b8d-8637-e7be304c4c86" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.401961 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.407951 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.408496 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.408285 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.408751 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-2kkz8" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.413361 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.545352 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.545477 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.545522 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-config-data\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.545540 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.545603 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.545676 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.545697 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6c9x\" (UniqueName: \"kubernetes.io/projected/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-kube-api-access-r6c9x\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.545719 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.545764 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.648091 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.648233 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.648298 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6c9x\" (UniqueName: \"kubernetes.io/projected/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-kube-api-access-r6c9x\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.648331 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.648398 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.648516 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.648612 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.649718 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-config-data\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.648827 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.649789 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.649810 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.650505 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.650931 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-config-data\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.651117 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.656305 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.658181 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.667354 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.675444 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6c9x\" (UniqueName: \"kubernetes.io/projected/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-kube-api-access-r6c9x\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.695724 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " pod="openstack/tempest-tests-tempest" Nov 28 16:16:17 crc kubenswrapper[4647]: I1128 16:16:17.738474 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 28 16:16:18 crc kubenswrapper[4647]: I1128 16:16:18.225051 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 28 16:16:18 crc kubenswrapper[4647]: I1128 16:16:18.233253 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:16:19 crc kubenswrapper[4647]: I1128 16:16:18.999671 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"5f2af68b-575d-469b-ab8d-7f16dfadc0d7","Type":"ContainerStarted","Data":"22e3ec1363b67b592fa2e6f2b9f7fa12338782a2b966b8cc6d7e33a058b5692b"} Nov 28 16:16:27 crc kubenswrapper[4647]: I1128 16:16:27.394323 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:16:27 crc kubenswrapper[4647]: E1128 16:16:27.395455 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:16:41 crc kubenswrapper[4647]: I1128 16:16:41.396018 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:16:41 crc kubenswrapper[4647]: E1128 16:16:41.397015 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:16:43 crc kubenswrapper[4647]: I1128 16:16:43.690591 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-vnbr8" podUID="dc709627-0843-4e8f-8485-5ac40ec5b457" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:16:56 crc kubenswrapper[4647]: I1128 16:16:56.394753 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:16:56 crc kubenswrapper[4647]: E1128 16:16:56.395469 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:16:56 crc kubenswrapper[4647]: E1128 16:16:56.543141 4647 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 28 16:16:56 crc kubenswrapper[4647]: E1128 16:16:56.545996 4647 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r6c9x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(5f2af68b-575d-469b-ab8d-7f16dfadc0d7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:16:56 crc kubenswrapper[4647]: E1128 16:16:56.548699 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="5f2af68b-575d-469b-ab8d-7f16dfadc0d7" Nov 28 16:16:57 crc kubenswrapper[4647]: E1128 16:16:57.459558 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="5f2af68b-575d-469b-ab8d-7f16dfadc0d7" Nov 28 16:17:09 crc kubenswrapper[4647]: I1128 16:17:09.028194 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 28 16:17:09 crc kubenswrapper[4647]: I1128 16:17:09.394964 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:17:09 crc kubenswrapper[4647]: E1128 16:17:09.396003 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:17:10 crc kubenswrapper[4647]: I1128 16:17:10.588612 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"5f2af68b-575d-469b-ab8d-7f16dfadc0d7","Type":"ContainerStarted","Data":"9a8bd37e5bda990b139a6b2ef4d7d5a18bb0a95c41b34613899a5e35a2f3a822"} Nov 28 16:17:10 crc kubenswrapper[4647]: I1128 16:17:10.619194 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.826520017 podStartE2EDuration="54.619174394s" podCreationTimestamp="2025-11-28 16:16:16 +0000 UTC" firstStartedPulling="2025-11-28 16:16:18.232934741 +0000 UTC m=+3108.080541172" lastFinishedPulling="2025-11-28 16:17:09.025589118 +0000 UTC m=+3158.873195549" observedRunningTime="2025-11-28 16:17:10.606376384 +0000 UTC m=+3160.453982815" watchObservedRunningTime="2025-11-28 16:17:10.619174394 +0000 UTC m=+3160.466780825" Nov 28 16:17:23 crc kubenswrapper[4647]: I1128 16:17:23.395029 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:17:23 crc kubenswrapper[4647]: I1128 16:17:23.776344 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"832c91673ae6e5cccd5d6e47dce862dbb9a90f3dcb052b3b01eca5f8e77b3023"} Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.465466 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4kth9"] Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.469376 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.497970 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4kth9"] Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.563255 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-utilities\") pod \"redhat-marketplace-4kth9\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.563337 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-catalog-content\") pod \"redhat-marketplace-4kth9\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.563392 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45dsg\" (UniqueName: \"kubernetes.io/projected/0f823e00-a88b-4c55-bfbb-95156ee15e68-kube-api-access-45dsg\") pod \"redhat-marketplace-4kth9\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.678830 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-utilities\") pod \"redhat-marketplace-4kth9\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.678930 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-catalog-content\") pod \"redhat-marketplace-4kth9\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.678985 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45dsg\" (UniqueName: \"kubernetes.io/projected/0f823e00-a88b-4c55-bfbb-95156ee15e68-kube-api-access-45dsg\") pod \"redhat-marketplace-4kth9\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.680188 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-utilities\") pod \"redhat-marketplace-4kth9\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.680560 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-catalog-content\") pod \"redhat-marketplace-4kth9\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.710129 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45dsg\" (UniqueName: \"kubernetes.io/projected/0f823e00-a88b-4c55-bfbb-95156ee15e68-kube-api-access-45dsg\") pod \"redhat-marketplace-4kth9\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:46 crc kubenswrapper[4647]: I1128 16:19:46.797572 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:47 crc kubenswrapper[4647]: I1128 16:19:47.022882 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:19:47 crc kubenswrapper[4647]: I1128 16:19:47.023249 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:19:49 crc kubenswrapper[4647]: I1128 16:19:49.615155 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4kth9"] Nov 28 16:19:50 crc kubenswrapper[4647]: I1128 16:19:50.493928 4647 generic.go:334] "Generic (PLEG): container finished" podID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerID="6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790" exitCode=0 Nov 28 16:19:50 crc kubenswrapper[4647]: I1128 16:19:50.494068 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kth9" event={"ID":"0f823e00-a88b-4c55-bfbb-95156ee15e68","Type":"ContainerDied","Data":"6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790"} Nov 28 16:19:50 crc kubenswrapper[4647]: I1128 16:19:50.494346 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kth9" event={"ID":"0f823e00-a88b-4c55-bfbb-95156ee15e68","Type":"ContainerStarted","Data":"94ac8386a624f8ecad2753d355eab35b88134209c84697cc956ec28e7a712044"} Nov 28 16:19:51 crc kubenswrapper[4647]: I1128 16:19:51.509273 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kth9" event={"ID":"0f823e00-a88b-4c55-bfbb-95156ee15e68","Type":"ContainerStarted","Data":"09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74"} Nov 28 16:19:52 crc kubenswrapper[4647]: I1128 16:19:52.551318 4647 generic.go:334] "Generic (PLEG): container finished" podID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerID="09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74" exitCode=0 Nov 28 16:19:52 crc kubenswrapper[4647]: I1128 16:19:52.551677 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kth9" event={"ID":"0f823e00-a88b-4c55-bfbb-95156ee15e68","Type":"ContainerDied","Data":"09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74"} Nov 28 16:19:54 crc kubenswrapper[4647]: I1128 16:19:54.570003 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kth9" event={"ID":"0f823e00-a88b-4c55-bfbb-95156ee15e68","Type":"ContainerStarted","Data":"3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5"} Nov 28 16:19:54 crc kubenswrapper[4647]: I1128 16:19:54.606348 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4kth9" podStartSLOduration=5.967325708 podStartE2EDuration="8.606319528s" podCreationTimestamp="2025-11-28 16:19:46 +0000 UTC" firstStartedPulling="2025-11-28 16:19:50.496514429 +0000 UTC m=+3320.344120850" lastFinishedPulling="2025-11-28 16:19:53.135508229 +0000 UTC m=+3322.983114670" observedRunningTime="2025-11-28 16:19:54.601369887 +0000 UTC m=+3324.448976338" watchObservedRunningTime="2025-11-28 16:19:54.606319528 +0000 UTC m=+3324.453925979" Nov 28 16:19:56 crc kubenswrapper[4647]: I1128 16:19:56.798792 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:56 crc kubenswrapper[4647]: I1128 16:19:56.798848 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:19:56 crc kubenswrapper[4647]: I1128 16:19:56.852305 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:20:06 crc kubenswrapper[4647]: I1128 16:20:06.855558 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:20:06 crc kubenswrapper[4647]: I1128 16:20:06.912740 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4kth9"] Nov 28 16:20:07 crc kubenswrapper[4647]: I1128 16:20:07.698881 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4kth9" podUID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerName="registry-server" containerID="cri-o://3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5" gracePeriod=2 Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.451713 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.539163 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45dsg\" (UniqueName: \"kubernetes.io/projected/0f823e00-a88b-4c55-bfbb-95156ee15e68-kube-api-access-45dsg\") pod \"0f823e00-a88b-4c55-bfbb-95156ee15e68\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.539344 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-catalog-content\") pod \"0f823e00-a88b-4c55-bfbb-95156ee15e68\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.539519 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-utilities\") pod \"0f823e00-a88b-4c55-bfbb-95156ee15e68\" (UID: \"0f823e00-a88b-4c55-bfbb-95156ee15e68\") " Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.541152 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-utilities" (OuterVolumeSpecName: "utilities") pod "0f823e00-a88b-4c55-bfbb-95156ee15e68" (UID: "0f823e00-a88b-4c55-bfbb-95156ee15e68"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.548306 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f823e00-a88b-4c55-bfbb-95156ee15e68-kube-api-access-45dsg" (OuterVolumeSpecName: "kube-api-access-45dsg") pod "0f823e00-a88b-4c55-bfbb-95156ee15e68" (UID: "0f823e00-a88b-4c55-bfbb-95156ee15e68"). InnerVolumeSpecName "kube-api-access-45dsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.557078 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0f823e00-a88b-4c55-bfbb-95156ee15e68" (UID: "0f823e00-a88b-4c55-bfbb-95156ee15e68"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.643658 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45dsg\" (UniqueName: \"kubernetes.io/projected/0f823e00-a88b-4c55-bfbb-95156ee15e68-kube-api-access-45dsg\") on node \"crc\" DevicePath \"\"" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.643691 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.643700 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f823e00-a88b-4c55-bfbb-95156ee15e68-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.716377 4647 generic.go:334] "Generic (PLEG): container finished" podID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerID="3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5" exitCode=0 Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.716466 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kth9" event={"ID":"0f823e00-a88b-4c55-bfbb-95156ee15e68","Type":"ContainerDied","Data":"3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5"} Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.716501 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kth9" event={"ID":"0f823e00-a88b-4c55-bfbb-95156ee15e68","Type":"ContainerDied","Data":"94ac8386a624f8ecad2753d355eab35b88134209c84697cc956ec28e7a712044"} Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.716523 4647 scope.go:117] "RemoveContainer" containerID="3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.716710 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4kth9" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.755895 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4kth9"] Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.763821 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4kth9"] Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.764032 4647 scope.go:117] "RemoveContainer" containerID="09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.787200 4647 scope.go:117] "RemoveContainer" containerID="6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.863649 4647 scope.go:117] "RemoveContainer" containerID="3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5" Nov 28 16:20:08 crc kubenswrapper[4647]: E1128 16:20:08.865340 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5\": container with ID starting with 3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5 not found: ID does not exist" containerID="3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.865393 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5"} err="failed to get container status \"3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5\": rpc error: code = NotFound desc = could not find container \"3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5\": container with ID starting with 3a987e06ac98f44ebf95abc474cf45c01e12ac96c70b58708ed406cf56c38ee5 not found: ID does not exist" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.865525 4647 scope.go:117] "RemoveContainer" containerID="09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74" Nov 28 16:20:08 crc kubenswrapper[4647]: E1128 16:20:08.866051 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74\": container with ID starting with 09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74 not found: ID does not exist" containerID="09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.866081 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74"} err="failed to get container status \"09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74\": rpc error: code = NotFound desc = could not find container \"09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74\": container with ID starting with 09871fd30010d526b28b813da83caed264fb49fd606f1f29916752ce29dc9b74 not found: ID does not exist" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.866100 4647 scope.go:117] "RemoveContainer" containerID="6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790" Nov 28 16:20:08 crc kubenswrapper[4647]: E1128 16:20:08.866506 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790\": container with ID starting with 6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790 not found: ID does not exist" containerID="6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790" Nov 28 16:20:08 crc kubenswrapper[4647]: I1128 16:20:08.866537 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790"} err="failed to get container status \"6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790\": rpc error: code = NotFound desc = could not find container \"6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790\": container with ID starting with 6e2dc9ca6b2e7d0b6ccc27f800883645cc7e95551b39b81825bbe818de198790 not found: ID does not exist" Nov 28 16:20:10 crc kubenswrapper[4647]: I1128 16:20:10.407402 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f823e00-a88b-4c55-bfbb-95156ee15e68" path="/var/lib/kubelet/pods/0f823e00-a88b-4c55-bfbb-95156ee15e68/volumes" Nov 28 16:20:17 crc kubenswrapper[4647]: I1128 16:20:17.022569 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:20:17 crc kubenswrapper[4647]: I1128 16:20:17.023197 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.708533 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-h5xbw"] Nov 28 16:20:28 crc kubenswrapper[4647]: E1128 16:20:28.710275 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerName="extract-content" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.710301 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerName="extract-content" Nov 28 16:20:28 crc kubenswrapper[4647]: E1128 16:20:28.710346 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerName="registry-server" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.710355 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerName="registry-server" Nov 28 16:20:28 crc kubenswrapper[4647]: E1128 16:20:28.710380 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerName="extract-utilities" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.710388 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerName="extract-utilities" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.710703 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f823e00-a88b-4c55-bfbb-95156ee15e68" containerName="registry-server" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.713442 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.720813 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h5xbw"] Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.824509 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-catalog-content\") pod \"certified-operators-h5xbw\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.824947 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls7gf\" (UniqueName: \"kubernetes.io/projected/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-kube-api-access-ls7gf\") pod \"certified-operators-h5xbw\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.825018 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-utilities\") pod \"certified-operators-h5xbw\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.927261 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls7gf\" (UniqueName: \"kubernetes.io/projected/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-kube-api-access-ls7gf\") pod \"certified-operators-h5xbw\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.927443 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-utilities\") pod \"certified-operators-h5xbw\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.927542 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-catalog-content\") pod \"certified-operators-h5xbw\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.927915 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-utilities\") pod \"certified-operators-h5xbw\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.928148 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-catalog-content\") pod \"certified-operators-h5xbw\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:28 crc kubenswrapper[4647]: I1128 16:20:28.953995 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls7gf\" (UniqueName: \"kubernetes.io/projected/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-kube-api-access-ls7gf\") pod \"certified-operators-h5xbw\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:29 crc kubenswrapper[4647]: I1128 16:20:29.043695 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:29 crc kubenswrapper[4647]: I1128 16:20:29.567283 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h5xbw"] Nov 28 16:20:29 crc kubenswrapper[4647]: I1128 16:20:29.914204 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h5xbw" event={"ID":"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0","Type":"ContainerStarted","Data":"374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b"} Nov 28 16:20:29 crc kubenswrapper[4647]: I1128 16:20:29.914564 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h5xbw" event={"ID":"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0","Type":"ContainerStarted","Data":"1c8a8b921ea34910c3f53e591b7daa7ec6d9e6fe9f209f5720c6a3c5f35801d9"} Nov 28 16:20:30 crc kubenswrapper[4647]: I1128 16:20:30.946312 4647 generic.go:334] "Generic (PLEG): container finished" podID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerID="374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b" exitCode=0 Nov 28 16:20:30 crc kubenswrapper[4647]: I1128 16:20:30.947646 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h5xbw" event={"ID":"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0","Type":"ContainerDied","Data":"374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b"} Nov 28 16:20:33 crc kubenswrapper[4647]: I1128 16:20:33.977236 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h5xbw" event={"ID":"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0","Type":"ContainerStarted","Data":"7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945"} Nov 28 16:20:34 crc kubenswrapper[4647]: I1128 16:20:34.990866 4647 generic.go:334] "Generic (PLEG): container finished" podID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerID="7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945" exitCode=0 Nov 28 16:20:34 crc kubenswrapper[4647]: I1128 16:20:34.990919 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h5xbw" event={"ID":"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0","Type":"ContainerDied","Data":"7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945"} Nov 28 16:20:37 crc kubenswrapper[4647]: I1128 16:20:37.012715 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h5xbw" event={"ID":"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0","Type":"ContainerStarted","Data":"977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f"} Nov 28 16:20:37 crc kubenswrapper[4647]: I1128 16:20:37.035993 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-h5xbw" podStartSLOduration=4.414014542 podStartE2EDuration="9.035950895s" podCreationTimestamp="2025-11-28 16:20:28 +0000 UTC" firstStartedPulling="2025-11-28 16:20:30.949781647 +0000 UTC m=+3360.797388058" lastFinishedPulling="2025-11-28 16:20:35.57171799 +0000 UTC m=+3365.419324411" observedRunningTime="2025-11-28 16:20:37.034911517 +0000 UTC m=+3366.882517938" watchObservedRunningTime="2025-11-28 16:20:37.035950895 +0000 UTC m=+3366.883557316" Nov 28 16:20:39 crc kubenswrapper[4647]: I1128 16:20:39.043879 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:39 crc kubenswrapper[4647]: I1128 16:20:39.044358 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:39 crc kubenswrapper[4647]: I1128 16:20:39.100395 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:47 crc kubenswrapper[4647]: I1128 16:20:47.023248 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:20:47 crc kubenswrapper[4647]: I1128 16:20:47.023998 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:47 crc kubenswrapper[4647]: I1128 16:20:47.024046 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:20:47 crc kubenswrapper[4647]: I1128 16:20:47.024875 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"832c91673ae6e5cccd5d6e47dce862dbb9a90f3dcb052b3b01eca5f8e77b3023"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:20:47 crc kubenswrapper[4647]: I1128 16:20:47.024938 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://832c91673ae6e5cccd5d6e47dce862dbb9a90f3dcb052b3b01eca5f8e77b3023" gracePeriod=600 Nov 28 16:20:48 crc kubenswrapper[4647]: I1128 16:20:48.143908 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="832c91673ae6e5cccd5d6e47dce862dbb9a90f3dcb052b3b01eca5f8e77b3023" exitCode=0 Nov 28 16:20:48 crc kubenswrapper[4647]: I1128 16:20:48.144764 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"832c91673ae6e5cccd5d6e47dce862dbb9a90f3dcb052b3b01eca5f8e77b3023"} Nov 28 16:20:48 crc kubenswrapper[4647]: I1128 16:20:48.144998 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43"} Nov 28 16:20:48 crc kubenswrapper[4647]: I1128 16:20:48.145049 4647 scope.go:117] "RemoveContainer" containerID="3c37886ef796eaf37306af817b49b73ffb8329caac2792eea85aeda39b8279f4" Nov 28 16:20:49 crc kubenswrapper[4647]: I1128 16:20:49.107250 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:49 crc kubenswrapper[4647]: I1128 16:20:49.187405 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h5xbw"] Nov 28 16:20:49 crc kubenswrapper[4647]: I1128 16:20:49.187752 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-h5xbw" podUID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerName="registry-server" containerID="cri-o://977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f" gracePeriod=2 Nov 28 16:20:49 crc kubenswrapper[4647]: I1128 16:20:49.985692 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.049230 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-utilities\") pod \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.049640 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ls7gf\" (UniqueName: \"kubernetes.io/projected/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-kube-api-access-ls7gf\") pod \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.049803 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-catalog-content\") pod \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\" (UID: \"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0\") " Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.059022 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-utilities" (OuterVolumeSpecName: "utilities") pod "f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" (UID: "f4e2cc35-2a33-4c55-9b1d-8330a34be7d0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.076749 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-kube-api-access-ls7gf" (OuterVolumeSpecName: "kube-api-access-ls7gf") pod "f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" (UID: "f4e2cc35-2a33-4c55-9b1d-8330a34be7d0"). InnerVolumeSpecName "kube-api-access-ls7gf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.148818 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" (UID: "f4e2cc35-2a33-4c55-9b1d-8330a34be7d0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.153796 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.153924 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ls7gf\" (UniqueName: \"kubernetes.io/projected/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-kube-api-access-ls7gf\") on node \"crc\" DevicePath \"\"" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.153942 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.180528 4647 generic.go:334] "Generic (PLEG): container finished" podID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerID="977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f" exitCode=0 Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.180581 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h5xbw" event={"ID":"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0","Type":"ContainerDied","Data":"977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f"} Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.180627 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h5xbw" event={"ID":"f4e2cc35-2a33-4c55-9b1d-8330a34be7d0","Type":"ContainerDied","Data":"1c8a8b921ea34910c3f53e591b7daa7ec6d9e6fe9f209f5720c6a3c5f35801d9"} Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.180651 4647 scope.go:117] "RemoveContainer" containerID="977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.180696 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h5xbw" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.213030 4647 scope.go:117] "RemoveContainer" containerID="7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.225719 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h5xbw"] Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.234831 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-h5xbw"] Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.245878 4647 scope.go:117] "RemoveContainer" containerID="374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.281050 4647 scope.go:117] "RemoveContainer" containerID="977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f" Nov 28 16:20:50 crc kubenswrapper[4647]: E1128 16:20:50.281474 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f\": container with ID starting with 977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f not found: ID does not exist" containerID="977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.281546 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f"} err="failed to get container status \"977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f\": rpc error: code = NotFound desc = could not find container \"977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f\": container with ID starting with 977321af5086d21cbaf906f4e74cfc22026333e29b27239f7bc7b5d79f00a53f not found: ID does not exist" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.281578 4647 scope.go:117] "RemoveContainer" containerID="7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945" Nov 28 16:20:50 crc kubenswrapper[4647]: E1128 16:20:50.282529 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945\": container with ID starting with 7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945 not found: ID does not exist" containerID="7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.282562 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945"} err="failed to get container status \"7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945\": rpc error: code = NotFound desc = could not find container \"7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945\": container with ID starting with 7c2541602b29fe48b4ff05b1ef5d3d74b7af33286c26a1ee154b2d48ae30e945 not found: ID does not exist" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.282579 4647 scope.go:117] "RemoveContainer" containerID="374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b" Nov 28 16:20:50 crc kubenswrapper[4647]: E1128 16:20:50.284115 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b\": container with ID starting with 374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b not found: ID does not exist" containerID="374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.284140 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b"} err="failed to get container status \"374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b\": rpc error: code = NotFound desc = could not find container \"374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b\": container with ID starting with 374e37ec8b48c0fe53a43866016cf7a8e7d02b22e3a6c2259cee85841520ae0b not found: ID does not exist" Nov 28 16:20:50 crc kubenswrapper[4647]: I1128 16:20:50.405271 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" path="/var/lib/kubelet/pods/f4e2cc35-2a33-4c55-9b1d-8330a34be7d0/volumes" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.298861 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-h96ml"] Nov 28 16:21:35 crc kubenswrapper[4647]: E1128 16:21:35.303006 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerName="extract-content" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.303251 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerName="extract-content" Nov 28 16:21:35 crc kubenswrapper[4647]: E1128 16:21:35.303441 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerName="registry-server" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.303581 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerName="registry-server" Nov 28 16:21:35 crc kubenswrapper[4647]: E1128 16:21:35.303719 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerName="extract-utilities" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.303849 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerName="extract-utilities" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.304339 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4e2cc35-2a33-4c55-9b1d-8330a34be7d0" containerName="registry-server" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.310211 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.320143 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h96ml"] Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.490666 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-catalog-content\") pod \"community-operators-h96ml\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.490837 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-utilities\") pod \"community-operators-h96ml\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.490909 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd6r8\" (UniqueName: \"kubernetes.io/projected/af10f7a6-4842-4715-a08b-73fb1c94b5da-kube-api-access-gd6r8\") pod \"community-operators-h96ml\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.593030 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-utilities\") pod \"community-operators-h96ml\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.593162 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd6r8\" (UniqueName: \"kubernetes.io/projected/af10f7a6-4842-4715-a08b-73fb1c94b5da-kube-api-access-gd6r8\") pod \"community-operators-h96ml\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.593274 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-catalog-content\") pod \"community-operators-h96ml\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.593558 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-utilities\") pod \"community-operators-h96ml\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.594196 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-catalog-content\") pod \"community-operators-h96ml\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.626384 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd6r8\" (UniqueName: \"kubernetes.io/projected/af10f7a6-4842-4715-a08b-73fb1c94b5da-kube-api-access-gd6r8\") pod \"community-operators-h96ml\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:35 crc kubenswrapper[4647]: I1128 16:21:35.638871 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:36 crc kubenswrapper[4647]: I1128 16:21:36.169870 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h96ml"] Nov 28 16:21:36 crc kubenswrapper[4647]: I1128 16:21:36.649238 4647 generic.go:334] "Generic (PLEG): container finished" podID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerID="7c478eada422722aded3ebf8596790c7768a4e92dfd20c14c1df3a6433771ca3" exitCode=0 Nov 28 16:21:36 crc kubenswrapper[4647]: I1128 16:21:36.649462 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h96ml" event={"ID":"af10f7a6-4842-4715-a08b-73fb1c94b5da","Type":"ContainerDied","Data":"7c478eada422722aded3ebf8596790c7768a4e92dfd20c14c1df3a6433771ca3"} Nov 28 16:21:36 crc kubenswrapper[4647]: I1128 16:21:36.649805 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h96ml" event={"ID":"af10f7a6-4842-4715-a08b-73fb1c94b5da","Type":"ContainerStarted","Data":"b3a79b83859363bd0a6840874c5c474be5dcd80145f61f24e63649a07d208b75"} Nov 28 16:21:36 crc kubenswrapper[4647]: I1128 16:21:36.651958 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:21:38 crc kubenswrapper[4647]: I1128 16:21:38.687855 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h96ml" event={"ID":"af10f7a6-4842-4715-a08b-73fb1c94b5da","Type":"ContainerStarted","Data":"ea3553b2973263af59565ad09f771a7a71857c2e14d522d4c1ef5ab53465968d"} Nov 28 16:21:39 crc kubenswrapper[4647]: I1128 16:21:39.701283 4647 generic.go:334] "Generic (PLEG): container finished" podID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerID="ea3553b2973263af59565ad09f771a7a71857c2e14d522d4c1ef5ab53465968d" exitCode=0 Nov 28 16:21:39 crc kubenswrapper[4647]: I1128 16:21:39.701350 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h96ml" event={"ID":"af10f7a6-4842-4715-a08b-73fb1c94b5da","Type":"ContainerDied","Data":"ea3553b2973263af59565ad09f771a7a71857c2e14d522d4c1ef5ab53465968d"} Nov 28 16:21:41 crc kubenswrapper[4647]: I1128 16:21:41.727921 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h96ml" event={"ID":"af10f7a6-4842-4715-a08b-73fb1c94b5da","Type":"ContainerStarted","Data":"8dfe72a9c9be3dedd1563146039e3486f238041f4c2629ab0ae5d59d414181fd"} Nov 28 16:21:41 crc kubenswrapper[4647]: I1128 16:21:41.761138 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-h96ml" podStartSLOduration=2.104267252 podStartE2EDuration="6.761119831s" podCreationTimestamp="2025-11-28 16:21:35 +0000 UTC" firstStartedPulling="2025-11-28 16:21:36.651310199 +0000 UTC m=+3426.498916620" lastFinishedPulling="2025-11-28 16:21:41.308162778 +0000 UTC m=+3431.155769199" observedRunningTime="2025-11-28 16:21:41.760367501 +0000 UTC m=+3431.607973932" watchObservedRunningTime="2025-11-28 16:21:41.761119831 +0000 UTC m=+3431.608726252" Nov 28 16:21:45 crc kubenswrapper[4647]: I1128 16:21:45.640043 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:45 crc kubenswrapper[4647]: I1128 16:21:45.641074 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:46 crc kubenswrapper[4647]: I1128 16:21:46.706969 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-h96ml" podUID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerName="registry-server" probeResult="failure" output=< Nov 28 16:21:46 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 16:21:46 crc kubenswrapper[4647]: > Nov 28 16:21:55 crc kubenswrapper[4647]: I1128 16:21:55.705481 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:55 crc kubenswrapper[4647]: I1128 16:21:55.763779 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:55 crc kubenswrapper[4647]: I1128 16:21:55.944741 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h96ml"] Nov 28 16:21:56 crc kubenswrapper[4647]: I1128 16:21:56.884187 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-h96ml" podUID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerName="registry-server" containerID="cri-o://8dfe72a9c9be3dedd1563146039e3486f238041f4c2629ab0ae5d59d414181fd" gracePeriod=2 Nov 28 16:21:57 crc kubenswrapper[4647]: I1128 16:21:57.902254 4647 generic.go:334] "Generic (PLEG): container finished" podID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerID="8dfe72a9c9be3dedd1563146039e3486f238041f4c2629ab0ae5d59d414181fd" exitCode=0 Nov 28 16:21:57 crc kubenswrapper[4647]: I1128 16:21:57.903524 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h96ml" event={"ID":"af10f7a6-4842-4715-a08b-73fb1c94b5da","Type":"ContainerDied","Data":"8dfe72a9c9be3dedd1563146039e3486f238041f4c2629ab0ae5d59d414181fd"} Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.453560 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.531506 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gd6r8\" (UniqueName: \"kubernetes.io/projected/af10f7a6-4842-4715-a08b-73fb1c94b5da-kube-api-access-gd6r8\") pod \"af10f7a6-4842-4715-a08b-73fb1c94b5da\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.531887 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-catalog-content\") pod \"af10f7a6-4842-4715-a08b-73fb1c94b5da\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.531986 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-utilities\") pod \"af10f7a6-4842-4715-a08b-73fb1c94b5da\" (UID: \"af10f7a6-4842-4715-a08b-73fb1c94b5da\") " Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.534007 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-utilities" (OuterVolumeSpecName: "utilities") pod "af10f7a6-4842-4715-a08b-73fb1c94b5da" (UID: "af10f7a6-4842-4715-a08b-73fb1c94b5da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.547833 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af10f7a6-4842-4715-a08b-73fb1c94b5da-kube-api-access-gd6r8" (OuterVolumeSpecName: "kube-api-access-gd6r8") pod "af10f7a6-4842-4715-a08b-73fb1c94b5da" (UID: "af10f7a6-4842-4715-a08b-73fb1c94b5da"). InnerVolumeSpecName "kube-api-access-gd6r8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.627937 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "af10f7a6-4842-4715-a08b-73fb1c94b5da" (UID: "af10f7a6-4842-4715-a08b-73fb1c94b5da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.637877 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.638263 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af10f7a6-4842-4715-a08b-73fb1c94b5da-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.638329 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gd6r8\" (UniqueName: \"kubernetes.io/projected/af10f7a6-4842-4715-a08b-73fb1c94b5da-kube-api-access-gd6r8\") on node \"crc\" DevicePath \"\"" Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.915512 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h96ml" event={"ID":"af10f7a6-4842-4715-a08b-73fb1c94b5da","Type":"ContainerDied","Data":"b3a79b83859363bd0a6840874c5c474be5dcd80145f61f24e63649a07d208b75"} Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.915583 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h96ml" Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.915593 4647 scope.go:117] "RemoveContainer" containerID="8dfe72a9c9be3dedd1563146039e3486f238041f4c2629ab0ae5d59d414181fd" Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.959556 4647 scope.go:117] "RemoveContainer" containerID="ea3553b2973263af59565ad09f771a7a71857c2e14d522d4c1ef5ab53465968d" Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.969081 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h96ml"] Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.980039 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-h96ml"] Nov 28 16:21:58 crc kubenswrapper[4647]: I1128 16:21:58.988845 4647 scope.go:117] "RemoveContainer" containerID="7c478eada422722aded3ebf8596790c7768a4e92dfd20c14c1df3a6433771ca3" Nov 28 16:22:00 crc kubenswrapper[4647]: I1128 16:22:00.408927 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af10f7a6-4842-4715-a08b-73fb1c94b5da" path="/var/lib/kubelet/pods/af10f7a6-4842-4715-a08b-73fb1c94b5da/volumes" Nov 28 16:22:47 crc kubenswrapper[4647]: I1128 16:22:47.022570 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:22:47 crc kubenswrapper[4647]: I1128 16:22:47.023190 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:23:17 crc kubenswrapper[4647]: I1128 16:23:17.022307 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:23:17 crc kubenswrapper[4647]: I1128 16:23:17.023199 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:23:47 crc kubenswrapper[4647]: I1128 16:23:47.023116 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:23:47 crc kubenswrapper[4647]: I1128 16:23:47.023693 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:23:47 crc kubenswrapper[4647]: I1128 16:23:47.023738 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:23:47 crc kubenswrapper[4647]: I1128 16:23:47.024468 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:23:47 crc kubenswrapper[4647]: I1128 16:23:47.024512 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" gracePeriod=600 Nov 28 16:23:47 crc kubenswrapper[4647]: E1128 16:23:47.328396 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:23:48 crc kubenswrapper[4647]: I1128 16:23:48.043052 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" exitCode=0 Nov 28 16:23:48 crc kubenswrapper[4647]: I1128 16:23:48.043095 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43"} Nov 28 16:23:48 crc kubenswrapper[4647]: I1128 16:23:48.043149 4647 scope.go:117] "RemoveContainer" containerID="832c91673ae6e5cccd5d6e47dce862dbb9a90f3dcb052b3b01eca5f8e77b3023" Nov 28 16:23:48 crc kubenswrapper[4647]: I1128 16:23:48.043904 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:23:48 crc kubenswrapper[4647]: E1128 16:23:48.044224 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:24:02 crc kubenswrapper[4647]: I1128 16:24:02.394337 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:24:02 crc kubenswrapper[4647]: E1128 16:24:02.396281 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:24:16 crc kubenswrapper[4647]: I1128 16:24:16.394234 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:24:16 crc kubenswrapper[4647]: E1128 16:24:16.394936 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:24:29 crc kubenswrapper[4647]: I1128 16:24:29.394370 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:24:29 crc kubenswrapper[4647]: E1128 16:24:29.395263 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:24:44 crc kubenswrapper[4647]: I1128 16:24:44.395244 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:24:44 crc kubenswrapper[4647]: E1128 16:24:44.396136 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:24:57 crc kubenswrapper[4647]: I1128 16:24:57.395061 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:24:57 crc kubenswrapper[4647]: E1128 16:24:57.395941 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:25:09 crc kubenswrapper[4647]: I1128 16:25:09.395193 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:25:09 crc kubenswrapper[4647]: E1128 16:25:09.396360 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:25:22 crc kubenswrapper[4647]: I1128 16:25:22.395493 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:25:22 crc kubenswrapper[4647]: E1128 16:25:22.396533 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:25:34 crc kubenswrapper[4647]: I1128 16:25:34.398118 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:25:34 crc kubenswrapper[4647]: E1128 16:25:34.399063 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:25:46 crc kubenswrapper[4647]: I1128 16:25:46.395939 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:25:46 crc kubenswrapper[4647]: E1128 16:25:46.397869 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:25:57 crc kubenswrapper[4647]: I1128 16:25:57.393868 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:25:57 crc kubenswrapper[4647]: E1128 16:25:57.394698 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:26:12 crc kubenswrapper[4647]: I1128 16:26:12.394011 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:26:12 crc kubenswrapper[4647]: E1128 16:26:12.394799 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.546845 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rmb87"] Nov 28 16:26:16 crc kubenswrapper[4647]: E1128 16:26:16.547535 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerName="registry-server" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.547546 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerName="registry-server" Nov 28 16:26:16 crc kubenswrapper[4647]: E1128 16:26:16.547572 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerName="extract-content" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.547578 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerName="extract-content" Nov 28 16:26:16 crc kubenswrapper[4647]: E1128 16:26:16.547600 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerName="extract-utilities" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.547607 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerName="extract-utilities" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.547776 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="af10f7a6-4842-4715-a08b-73fb1c94b5da" containerName="registry-server" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.549278 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.560531 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rmb87"] Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.562636 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-catalog-content\") pod \"redhat-operators-rmb87\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.562772 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-utilities\") pod \"redhat-operators-rmb87\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.562879 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx42k\" (UniqueName: \"kubernetes.io/projected/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-kube-api-access-qx42k\") pod \"redhat-operators-rmb87\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.665053 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-catalog-content\") pod \"redhat-operators-rmb87\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.665677 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-utilities\") pod \"redhat-operators-rmb87\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.665681 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-catalog-content\") pod \"redhat-operators-rmb87\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.665816 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx42k\" (UniqueName: \"kubernetes.io/projected/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-kube-api-access-qx42k\") pod \"redhat-operators-rmb87\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.666252 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-utilities\") pod \"redhat-operators-rmb87\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.696629 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx42k\" (UniqueName: \"kubernetes.io/projected/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-kube-api-access-qx42k\") pod \"redhat-operators-rmb87\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:16 crc kubenswrapper[4647]: I1128 16:26:16.882555 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:17 crc kubenswrapper[4647]: I1128 16:26:17.396624 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rmb87"] Nov 28 16:26:17 crc kubenswrapper[4647]: I1128 16:26:17.509988 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rmb87" event={"ID":"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74","Type":"ContainerStarted","Data":"255de43bfad6c7ea2aabe7c09ec890337ed7e54643a564aa3f32d334905ffb26"} Nov 28 16:26:18 crc kubenswrapper[4647]: I1128 16:26:18.521154 4647 generic.go:334] "Generic (PLEG): container finished" podID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerID="bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43" exitCode=0 Nov 28 16:26:18 crc kubenswrapper[4647]: I1128 16:26:18.521198 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rmb87" event={"ID":"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74","Type":"ContainerDied","Data":"bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43"} Nov 28 16:26:19 crc kubenswrapper[4647]: I1128 16:26:19.549787 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rmb87" event={"ID":"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74","Type":"ContainerStarted","Data":"dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e"} Nov 28 16:26:24 crc kubenswrapper[4647]: I1128 16:26:24.142927 4647 generic.go:334] "Generic (PLEG): container finished" podID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerID="dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e" exitCode=0 Nov 28 16:26:24 crc kubenswrapper[4647]: I1128 16:26:24.143440 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rmb87" event={"ID":"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74","Type":"ContainerDied","Data":"dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e"} Nov 28 16:26:25 crc kubenswrapper[4647]: I1128 16:26:25.155523 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rmb87" event={"ID":"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74","Type":"ContainerStarted","Data":"c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477"} Nov 28 16:26:25 crc kubenswrapper[4647]: I1128 16:26:25.183497 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rmb87" podStartSLOduration=2.89370961 podStartE2EDuration="9.18328693s" podCreationTimestamp="2025-11-28 16:26:16 +0000 UTC" firstStartedPulling="2025-11-28 16:26:18.524008648 +0000 UTC m=+3708.371615069" lastFinishedPulling="2025-11-28 16:26:24.813585968 +0000 UTC m=+3714.661192389" observedRunningTime="2025-11-28 16:26:25.175520864 +0000 UTC m=+3715.023127305" watchObservedRunningTime="2025-11-28 16:26:25.18328693 +0000 UTC m=+3715.030893351" Nov 28 16:26:25 crc kubenswrapper[4647]: I1128 16:26:25.395153 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:26:25 crc kubenswrapper[4647]: E1128 16:26:25.395838 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:26:26 crc kubenswrapper[4647]: I1128 16:26:26.883488 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:26 crc kubenswrapper[4647]: I1128 16:26:26.883794 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:27 crc kubenswrapper[4647]: I1128 16:26:27.937979 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rmb87" podUID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerName="registry-server" probeResult="failure" output=< Nov 28 16:26:27 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 16:26:27 crc kubenswrapper[4647]: > Nov 28 16:26:36 crc kubenswrapper[4647]: I1128 16:26:36.929052 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:36 crc kubenswrapper[4647]: I1128 16:26:36.979021 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:37 crc kubenswrapper[4647]: I1128 16:26:37.148147 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rmb87"] Nov 28 16:26:37 crc kubenswrapper[4647]: I1128 16:26:37.394597 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:26:37 crc kubenswrapper[4647]: E1128 16:26:37.395022 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:26:38 crc kubenswrapper[4647]: I1128 16:26:38.272792 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rmb87" podUID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerName="registry-server" containerID="cri-o://c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477" gracePeriod=2 Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.027998 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.099171 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-catalog-content\") pod \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.099239 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-utilities\") pod \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.099308 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qx42k\" (UniqueName: \"kubernetes.io/projected/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-kube-api-access-qx42k\") pod \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\" (UID: \"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74\") " Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.100839 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-utilities" (OuterVolumeSpecName: "utilities") pod "cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" (UID: "cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.102078 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.119898 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-kube-api-access-qx42k" (OuterVolumeSpecName: "kube-api-access-qx42k") pod "cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" (UID: "cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74"). InnerVolumeSpecName "kube-api-access-qx42k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.203889 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qx42k\" (UniqueName: \"kubernetes.io/projected/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-kube-api-access-qx42k\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.237977 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" (UID: "cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.286855 4647 generic.go:334] "Generic (PLEG): container finished" podID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerID="c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477" exitCode=0 Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.286920 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rmb87" event={"ID":"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74","Type":"ContainerDied","Data":"c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477"} Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.286959 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rmb87" event={"ID":"cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74","Type":"ContainerDied","Data":"255de43bfad6c7ea2aabe7c09ec890337ed7e54643a564aa3f32d334905ffb26"} Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.286956 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rmb87" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.286980 4647 scope.go:117] "RemoveContainer" containerID="c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.305487 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.319002 4647 scope.go:117] "RemoveContainer" containerID="dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.351514 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rmb87"] Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.360291 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rmb87"] Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.377076 4647 scope.go:117] "RemoveContainer" containerID="bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.408404 4647 scope.go:117] "RemoveContainer" containerID="c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477" Nov 28 16:26:39 crc kubenswrapper[4647]: E1128 16:26:39.409994 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477\": container with ID starting with c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477 not found: ID does not exist" containerID="c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.410032 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477"} err="failed to get container status \"c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477\": rpc error: code = NotFound desc = could not find container \"c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477\": container with ID starting with c1e27184475f812d2d262598a9e91560d5f1622bcf2b0e63d1dac5168ec00477 not found: ID does not exist" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.410057 4647 scope.go:117] "RemoveContainer" containerID="dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e" Nov 28 16:26:39 crc kubenswrapper[4647]: E1128 16:26:39.410530 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e\": container with ID starting with dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e not found: ID does not exist" containerID="dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.410547 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e"} err="failed to get container status \"dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e\": rpc error: code = NotFound desc = could not find container \"dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e\": container with ID starting with dc326136f0aa2a85901693e19bf422255d5e6cfd9eadf1b0c64659fe193c488e not found: ID does not exist" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.410559 4647 scope.go:117] "RemoveContainer" containerID="bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43" Nov 28 16:26:39 crc kubenswrapper[4647]: E1128 16:26:39.411061 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43\": container with ID starting with bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43 not found: ID does not exist" containerID="bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43" Nov 28 16:26:39 crc kubenswrapper[4647]: I1128 16:26:39.411106 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43"} err="failed to get container status \"bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43\": rpc error: code = NotFound desc = could not find container \"bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43\": container with ID starting with bbd459281032c72e00b28817ce23da176e3517eeee6a53c3dbab3613132e3f43 not found: ID does not exist" Nov 28 16:26:40 crc kubenswrapper[4647]: I1128 16:26:40.404927 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" path="/var/lib/kubelet/pods/cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74/volumes" Nov 28 16:26:48 crc kubenswrapper[4647]: I1128 16:26:48.395829 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:26:48 crc kubenswrapper[4647]: E1128 16:26:48.397366 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:27:01 crc kubenswrapper[4647]: I1128 16:27:01.395342 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:27:01 crc kubenswrapper[4647]: E1128 16:27:01.397028 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:27:16 crc kubenswrapper[4647]: I1128 16:27:16.395999 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:27:16 crc kubenswrapper[4647]: E1128 16:27:16.398922 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:27:27 crc kubenswrapper[4647]: I1128 16:27:27.394868 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:27:27 crc kubenswrapper[4647]: E1128 16:27:27.396836 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:27:42 crc kubenswrapper[4647]: I1128 16:27:42.396581 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:27:42 crc kubenswrapper[4647]: E1128 16:27:42.398508 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:27:56 crc kubenswrapper[4647]: I1128 16:27:56.395217 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:27:56 crc kubenswrapper[4647]: E1128 16:27:56.396330 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:28:11 crc kubenswrapper[4647]: I1128 16:28:11.395624 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:28:11 crc kubenswrapper[4647]: E1128 16:28:11.396487 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:28:24 crc kubenswrapper[4647]: I1128 16:28:24.394776 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:28:24 crc kubenswrapper[4647]: E1128 16:28:24.395450 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:28:39 crc kubenswrapper[4647]: I1128 16:28:39.394396 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:28:39 crc kubenswrapper[4647]: E1128 16:28:39.395201 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:28:51 crc kubenswrapper[4647]: I1128 16:28:51.395381 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:28:51 crc kubenswrapper[4647]: I1128 16:28:51.756936 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"69585191622efc9453f84702375ab57648d141ad8c8bdf73563158b9cd0e3fb1"} Nov 28 16:29:10 crc kubenswrapper[4647]: I1128 16:29:10.307801 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-glkws" podUID="00e78d31-cbbb-4ee5-b687-14e01b2761df" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.196187 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4"] Nov 28 16:30:00 crc kubenswrapper[4647]: E1128 16:30:00.199807 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerName="extract-content" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.199842 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerName="extract-content" Nov 28 16:30:00 crc kubenswrapper[4647]: E1128 16:30:00.199874 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerName="registry-server" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.199880 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerName="registry-server" Nov 28 16:30:00 crc kubenswrapper[4647]: E1128 16:30:00.199904 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerName="extract-utilities" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.199912 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerName="extract-utilities" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.200182 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd86c33e-5a39-4ec6-8559-dfc2bc9f5b74" containerName="registry-server" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.202625 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.210635 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.213934 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.225349 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4"] Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.277555 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-config-volume\") pod \"collect-profiles-29405790-gwsd4\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.277617 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49bxn\" (UniqueName: \"kubernetes.io/projected/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-kube-api-access-49bxn\") pod \"collect-profiles-29405790-gwsd4\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.277811 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-secret-volume\") pod \"collect-profiles-29405790-gwsd4\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.380057 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-config-volume\") pod \"collect-profiles-29405790-gwsd4\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.380122 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49bxn\" (UniqueName: \"kubernetes.io/projected/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-kube-api-access-49bxn\") pod \"collect-profiles-29405790-gwsd4\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.380240 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-secret-volume\") pod \"collect-profiles-29405790-gwsd4\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.381332 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-config-volume\") pod \"collect-profiles-29405790-gwsd4\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.393390 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-secret-volume\") pod \"collect-profiles-29405790-gwsd4\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.402401 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49bxn\" (UniqueName: \"kubernetes.io/projected/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-kube-api-access-49bxn\") pod \"collect-profiles-29405790-gwsd4\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:00 crc kubenswrapper[4647]: I1128 16:30:00.538609 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:01 crc kubenswrapper[4647]: I1128 16:30:01.074865 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4"] Nov 28 16:30:01 crc kubenswrapper[4647]: I1128 16:30:01.248589 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" event={"ID":"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f","Type":"ContainerStarted","Data":"6e602fa59bf8cb7fa465d7dc8bcbc9d5f1ce74604fbe1c42c6464caaeb5a6365"} Nov 28 16:30:02 crc kubenswrapper[4647]: I1128 16:30:02.262662 4647 generic.go:334] "Generic (PLEG): container finished" podID="b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f" containerID="faf2c6c30f0653a64aac51f8ade6552fdc01a5f0c876036ce447534a28de6dcd" exitCode=0 Nov 28 16:30:02 crc kubenswrapper[4647]: I1128 16:30:02.262783 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" event={"ID":"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f","Type":"ContainerDied","Data":"faf2c6c30f0653a64aac51f8ade6552fdc01a5f0c876036ce447534a28de6dcd"} Nov 28 16:30:03 crc kubenswrapper[4647]: I1128 16:30:03.829768 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:03 crc kubenswrapper[4647]: I1128 16:30:03.887025 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49bxn\" (UniqueName: \"kubernetes.io/projected/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-kube-api-access-49bxn\") pod \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " Nov 28 16:30:03 crc kubenswrapper[4647]: I1128 16:30:03.887176 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-config-volume\") pod \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " Nov 28 16:30:03 crc kubenswrapper[4647]: I1128 16:30:03.887241 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-secret-volume\") pod \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\" (UID: \"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f\") " Nov 28 16:30:03 crc kubenswrapper[4647]: I1128 16:30:03.891852 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-config-volume" (OuterVolumeSpecName: "config-volume") pod "b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f" (UID: "b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:03 crc kubenswrapper[4647]: I1128 16:30:03.897551 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f" (UID: "b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:30:03 crc kubenswrapper[4647]: I1128 16:30:03.937682 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-kube-api-access-49bxn" (OuterVolumeSpecName: "kube-api-access-49bxn") pod "b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f" (UID: "b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f"). InnerVolumeSpecName "kube-api-access-49bxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:03 crc kubenswrapper[4647]: I1128 16:30:03.989507 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49bxn\" (UniqueName: \"kubernetes.io/projected/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-kube-api-access-49bxn\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:03 crc kubenswrapper[4647]: I1128 16:30:03.989870 4647 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:03 crc kubenswrapper[4647]: I1128 16:30:03.989951 4647 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:04 crc kubenswrapper[4647]: I1128 16:30:04.285555 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" event={"ID":"b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f","Type":"ContainerDied","Data":"6e602fa59bf8cb7fa465d7dc8bcbc9d5f1ce74604fbe1c42c6464caaeb5a6365"} Nov 28 16:30:04 crc kubenswrapper[4647]: I1128 16:30:04.285688 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-gwsd4" Nov 28 16:30:04 crc kubenswrapper[4647]: I1128 16:30:04.285613 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e602fa59bf8cb7fa465d7dc8bcbc9d5f1ce74604fbe1c42c6464caaeb5a6365" Nov 28 16:30:04 crc kubenswrapper[4647]: I1128 16:30:04.931564 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2"] Nov 28 16:30:04 crc kubenswrapper[4647]: I1128 16:30:04.941048 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-xjhw2"] Nov 28 16:30:06 crc kubenswrapper[4647]: I1128 16:30:06.412820 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6559e55c-8d5f-4838-a3d6-585ce3815b9e" path="/var/lib/kubelet/pods/6559e55c-8d5f-4838-a3d6-585ce3815b9e/volumes" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.571954 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jtrxt"] Nov 28 16:30:09 crc kubenswrapper[4647]: E1128 16:30:09.573676 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f" containerName="collect-profiles" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.573700 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f" containerName="collect-profiles" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.574041 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="b50d4e7e-3a90-4a8b-b7e0-c5f203cfc79f" containerName="collect-profiles" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.576478 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.584925 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtrxt"] Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.660982 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6djp\" (UniqueName: \"kubernetes.io/projected/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-kube-api-access-l6djp\") pod \"redhat-marketplace-jtrxt\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.661178 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-utilities\") pod \"redhat-marketplace-jtrxt\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.661211 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-catalog-content\") pod \"redhat-marketplace-jtrxt\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.763283 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-utilities\") pod \"redhat-marketplace-jtrxt\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.763563 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-catalog-content\") pod \"redhat-marketplace-jtrxt\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.763689 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6djp\" (UniqueName: \"kubernetes.io/projected/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-kube-api-access-l6djp\") pod \"redhat-marketplace-jtrxt\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.764186 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-utilities\") pod \"redhat-marketplace-jtrxt\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.764427 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-catalog-content\") pod \"redhat-marketplace-jtrxt\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.785743 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6djp\" (UniqueName: \"kubernetes.io/projected/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-kube-api-access-l6djp\") pod \"redhat-marketplace-jtrxt\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:09 crc kubenswrapper[4647]: I1128 16:30:09.905590 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:10 crc kubenswrapper[4647]: I1128 16:30:10.501208 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtrxt"] Nov 28 16:30:11 crc kubenswrapper[4647]: I1128 16:30:11.366627 4647 generic.go:334] "Generic (PLEG): container finished" podID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerID="0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952" exitCode=0 Nov 28 16:30:11 crc kubenswrapper[4647]: I1128 16:30:11.366735 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtrxt" event={"ID":"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13","Type":"ContainerDied","Data":"0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952"} Nov 28 16:30:11 crc kubenswrapper[4647]: I1128 16:30:11.367129 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtrxt" event={"ID":"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13","Type":"ContainerStarted","Data":"192cb525598ac5aecb1dca2ad84f90ed9ba9a0cbd67a220018ed1792bf5e8e27"} Nov 28 16:30:11 crc kubenswrapper[4647]: I1128 16:30:11.370184 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:30:13 crc kubenswrapper[4647]: I1128 16:30:13.399818 4647 generic.go:334] "Generic (PLEG): container finished" podID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerID="1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15" exitCode=0 Nov 28 16:30:13 crc kubenswrapper[4647]: I1128 16:30:13.399905 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtrxt" event={"ID":"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13","Type":"ContainerDied","Data":"1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15"} Nov 28 16:30:14 crc kubenswrapper[4647]: I1128 16:30:14.418050 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtrxt" event={"ID":"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13","Type":"ContainerStarted","Data":"f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890"} Nov 28 16:30:14 crc kubenswrapper[4647]: I1128 16:30:14.440403 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jtrxt" podStartSLOduration=2.96363206 podStartE2EDuration="5.440388335s" podCreationTimestamp="2025-11-28 16:30:09 +0000 UTC" firstStartedPulling="2025-11-28 16:30:11.369862358 +0000 UTC m=+3941.217468779" lastFinishedPulling="2025-11-28 16:30:13.846618632 +0000 UTC m=+3943.694225054" observedRunningTime="2025-11-28 16:30:14.436329687 +0000 UTC m=+3944.283936128" watchObservedRunningTime="2025-11-28 16:30:14.440388335 +0000 UTC m=+3944.287994756" Nov 28 16:30:19 crc kubenswrapper[4647]: I1128 16:30:19.214985 4647 scope.go:117] "RemoveContainer" containerID="89159b9e18a6b502faf68ae1a1108a56f4b6fe58855e9cf32714323932c4cefa" Nov 28 16:30:19 crc kubenswrapper[4647]: I1128 16:30:19.905844 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:19 crc kubenswrapper[4647]: I1128 16:30:19.906506 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:19 crc kubenswrapper[4647]: I1128 16:30:19.962619 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:20 crc kubenswrapper[4647]: I1128 16:30:20.553326 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:20 crc kubenswrapper[4647]: I1128 16:30:20.614173 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtrxt"] Nov 28 16:30:22 crc kubenswrapper[4647]: I1128 16:30:22.941121 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jtrxt" podUID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerName="registry-server" containerID="cri-o://f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890" gracePeriod=2 Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.668090 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.864909 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-catalog-content\") pod \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.865545 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-utilities\") pod \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.865902 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6djp\" (UniqueName: \"kubernetes.io/projected/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-kube-api-access-l6djp\") pod \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\" (UID: \"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13\") " Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.866646 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-utilities" (OuterVolumeSpecName: "utilities") pod "a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" (UID: "a629f9b2-a85b-43ac-a0bd-2f65ccbadf13"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.877225 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-kube-api-access-l6djp" (OuterVolumeSpecName: "kube-api-access-l6djp") pod "a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" (UID: "a629f9b2-a85b-43ac-a0bd-2f65ccbadf13"). InnerVolumeSpecName "kube-api-access-l6djp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.895860 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" (UID: "a629f9b2-a85b-43ac-a0bd-2f65ccbadf13"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.953179 4647 generic.go:334] "Generic (PLEG): container finished" podID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerID="f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890" exitCode=0 Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.953713 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jtrxt" Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.953765 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtrxt" event={"ID":"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13","Type":"ContainerDied","Data":"f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890"} Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.954616 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtrxt" event={"ID":"a629f9b2-a85b-43ac-a0bd-2f65ccbadf13","Type":"ContainerDied","Data":"192cb525598ac5aecb1dca2ad84f90ed9ba9a0cbd67a220018ed1792bf5e8e27"} Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.954658 4647 scope.go:117] "RemoveContainer" containerID="f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890" Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.971564 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.971624 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6djp\" (UniqueName: \"kubernetes.io/projected/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-kube-api-access-l6djp\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.971642 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:23 crc kubenswrapper[4647]: I1128 16:30:23.988873 4647 scope.go:117] "RemoveContainer" containerID="1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15" Nov 28 16:30:24 crc kubenswrapper[4647]: I1128 16:30:24.021404 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtrxt"] Nov 28 16:30:24 crc kubenswrapper[4647]: I1128 16:30:24.029769 4647 scope.go:117] "RemoveContainer" containerID="0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952" Nov 28 16:30:24 crc kubenswrapper[4647]: I1128 16:30:24.035147 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtrxt"] Nov 28 16:30:24 crc kubenswrapper[4647]: I1128 16:30:24.070822 4647 scope.go:117] "RemoveContainer" containerID="f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890" Nov 28 16:30:24 crc kubenswrapper[4647]: E1128 16:30:24.075709 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890\": container with ID starting with f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890 not found: ID does not exist" containerID="f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890" Nov 28 16:30:24 crc kubenswrapper[4647]: I1128 16:30:24.075751 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890"} err="failed to get container status \"f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890\": rpc error: code = NotFound desc = could not find container \"f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890\": container with ID starting with f66f4cc19aa481ce986802633aeb2b46a75c45e1abd7a95d8ed1bf33199de890 not found: ID does not exist" Nov 28 16:30:24 crc kubenswrapper[4647]: I1128 16:30:24.075785 4647 scope.go:117] "RemoveContainer" containerID="1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15" Nov 28 16:30:24 crc kubenswrapper[4647]: E1128 16:30:24.077020 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15\": container with ID starting with 1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15 not found: ID does not exist" containerID="1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15" Nov 28 16:30:24 crc kubenswrapper[4647]: I1128 16:30:24.077057 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15"} err="failed to get container status \"1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15\": rpc error: code = NotFound desc = could not find container \"1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15\": container with ID starting with 1a85d63139ceaf8fe53f08c3459e802ac5c2d5d9acfacda4f418dae040806f15 not found: ID does not exist" Nov 28 16:30:24 crc kubenswrapper[4647]: I1128 16:30:24.077074 4647 scope.go:117] "RemoveContainer" containerID="0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952" Nov 28 16:30:24 crc kubenswrapper[4647]: E1128 16:30:24.077553 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952\": container with ID starting with 0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952 not found: ID does not exist" containerID="0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952" Nov 28 16:30:24 crc kubenswrapper[4647]: I1128 16:30:24.077608 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952"} err="failed to get container status \"0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952\": rpc error: code = NotFound desc = could not find container \"0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952\": container with ID starting with 0f40c01f3fcb0dccbfcaadc694d670aee9055221d08966f0b38e76bc0f1bc952 not found: ID does not exist" Nov 28 16:30:24 crc kubenswrapper[4647]: I1128 16:30:24.412256 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" path="/var/lib/kubelet/pods/a629f9b2-a85b-43ac-a0bd-2f65ccbadf13/volumes" Nov 28 16:31:17 crc kubenswrapper[4647]: I1128 16:31:17.023303 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:31:17 crc kubenswrapper[4647]: I1128 16:31:17.023891 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.316955 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gdr9c"] Nov 28 16:31:36 crc kubenswrapper[4647]: E1128 16:31:36.318906 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerName="registry-server" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.318986 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerName="registry-server" Nov 28 16:31:36 crc kubenswrapper[4647]: E1128 16:31:36.319056 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerName="extract-content" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.319108 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerName="extract-content" Nov 28 16:31:36 crc kubenswrapper[4647]: E1128 16:31:36.319174 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerName="extract-utilities" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.319230 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerName="extract-utilities" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.319481 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="a629f9b2-a85b-43ac-a0bd-2f65ccbadf13" containerName="registry-server" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.320972 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.328535 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-utilities\") pod \"certified-operators-gdr9c\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.328643 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-catalog-content\") pod \"certified-operators-gdr9c\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.328849 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq9qt\" (UniqueName: \"kubernetes.io/projected/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-kube-api-access-xq9qt\") pod \"certified-operators-gdr9c\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.344149 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gdr9c"] Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.431166 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq9qt\" (UniqueName: \"kubernetes.io/projected/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-kube-api-access-xq9qt\") pod \"certified-operators-gdr9c\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.431234 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-utilities\") pod \"certified-operators-gdr9c\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.431352 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-catalog-content\") pod \"certified-operators-gdr9c\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.432184 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-catalog-content\") pod \"certified-operators-gdr9c\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.432488 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-utilities\") pod \"certified-operators-gdr9c\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.458585 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq9qt\" (UniqueName: \"kubernetes.io/projected/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-kube-api-access-xq9qt\") pod \"certified-operators-gdr9c\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:36 crc kubenswrapper[4647]: I1128 16:31:36.642046 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:37 crc kubenswrapper[4647]: I1128 16:31:37.251570 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gdr9c"] Nov 28 16:31:37 crc kubenswrapper[4647]: W1128 16:31:37.262995 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podefd6a9c0_e68b_403c_9c5c_7b88c4356c1e.slice/crio-f5b0c6a94ca1eb537e29ba8ee3e366b807b9f01626cfcd6ceb28cac4e0967d21 WatchSource:0}: Error finding container f5b0c6a94ca1eb537e29ba8ee3e366b807b9f01626cfcd6ceb28cac4e0967d21: Status 404 returned error can't find the container with id f5b0c6a94ca1eb537e29ba8ee3e366b807b9f01626cfcd6ceb28cac4e0967d21 Nov 28 16:31:37 crc kubenswrapper[4647]: I1128 16:31:37.743940 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gdr9c" event={"ID":"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e","Type":"ContainerStarted","Data":"f5b0c6a94ca1eb537e29ba8ee3e366b807b9f01626cfcd6ceb28cac4e0967d21"} Nov 28 16:31:38 crc kubenswrapper[4647]: I1128 16:31:38.754770 4647 generic.go:334] "Generic (PLEG): container finished" podID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerID="8607dd339b21bca899485683abb2f21792c3fdc5751862ba7d9a4baf22906be2" exitCode=0 Nov 28 16:31:38 crc kubenswrapper[4647]: I1128 16:31:38.754823 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gdr9c" event={"ID":"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e","Type":"ContainerDied","Data":"8607dd339b21bca899485683abb2f21792c3fdc5751862ba7d9a4baf22906be2"} Nov 28 16:31:41 crc kubenswrapper[4647]: I1128 16:31:41.795021 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gdr9c" event={"ID":"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e","Type":"ContainerStarted","Data":"d3cff1f0e19639947860b34533cb30b2d02dc6408defeb3d9b67df2c068a2481"} Nov 28 16:31:42 crc kubenswrapper[4647]: I1128 16:31:42.810361 4647 generic.go:334] "Generic (PLEG): container finished" podID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerID="d3cff1f0e19639947860b34533cb30b2d02dc6408defeb3d9b67df2c068a2481" exitCode=0 Nov 28 16:31:42 crc kubenswrapper[4647]: I1128 16:31:42.810573 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gdr9c" event={"ID":"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e","Type":"ContainerDied","Data":"d3cff1f0e19639947860b34533cb30b2d02dc6408defeb3d9b67df2c068a2481"} Nov 28 16:31:45 crc kubenswrapper[4647]: I1128 16:31:45.521510 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gdr9c" event={"ID":"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e","Type":"ContainerStarted","Data":"004c9f9494d905f94bb45a55eab0ad34eee4ede89bdfdf9fc20afa189a428e11"} Nov 28 16:31:45 crc kubenswrapper[4647]: I1128 16:31:45.547601 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gdr9c" podStartSLOduration=3.2513051219999998 podStartE2EDuration="9.547577597s" podCreationTimestamp="2025-11-28 16:31:36 +0000 UTC" firstStartedPulling="2025-11-28 16:31:38.757259748 +0000 UTC m=+4028.604866169" lastFinishedPulling="2025-11-28 16:31:45.053532223 +0000 UTC m=+4034.901138644" observedRunningTime="2025-11-28 16:31:45.539061341 +0000 UTC m=+4035.386667772" watchObservedRunningTime="2025-11-28 16:31:45.547577597 +0000 UTC m=+4035.395184048" Nov 28 16:31:46 crc kubenswrapper[4647]: I1128 16:31:46.642962 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:46 crc kubenswrapper[4647]: I1128 16:31:46.643332 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:47 crc kubenswrapper[4647]: I1128 16:31:47.022706 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:31:47 crc kubenswrapper[4647]: I1128 16:31:47.022777 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:31:47 crc kubenswrapper[4647]: I1128 16:31:47.694093 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-gdr9c" podUID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerName="registry-server" probeResult="failure" output=< Nov 28 16:31:47 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 16:31:47 crc kubenswrapper[4647]: > Nov 28 16:31:56 crc kubenswrapper[4647]: I1128 16:31:56.696013 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:56 crc kubenswrapper[4647]: I1128 16:31:56.760768 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.363466 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-c5cnz"] Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.368239 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.399373 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c5cnz"] Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.536870 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-utilities\") pod \"community-operators-c5cnz\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.537293 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-catalog-content\") pod \"community-operators-c5cnz\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.537543 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dpq7\" (UniqueName: \"kubernetes.io/projected/062fd167-6639-4714-aafa-8550731b2995-kube-api-access-8dpq7\") pod \"community-operators-c5cnz\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.639845 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dpq7\" (UniqueName: \"kubernetes.io/projected/062fd167-6639-4714-aafa-8550731b2995-kube-api-access-8dpq7\") pod \"community-operators-c5cnz\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.639999 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-utilities\") pod \"community-operators-c5cnz\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.640057 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-catalog-content\") pod \"community-operators-c5cnz\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.640553 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-utilities\") pod \"community-operators-c5cnz\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.640777 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-catalog-content\") pod \"community-operators-c5cnz\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.680602 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dpq7\" (UniqueName: \"kubernetes.io/projected/062fd167-6639-4714-aafa-8550731b2995-kube-api-access-8dpq7\") pod \"community-operators-c5cnz\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:57 crc kubenswrapper[4647]: I1128 16:31:57.709900 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:31:58 crc kubenswrapper[4647]: I1128 16:31:58.539069 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c5cnz"] Nov 28 16:31:58 crc kubenswrapper[4647]: W1128 16:31:58.556235 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod062fd167_6639_4714_aafa_8550731b2995.slice/crio-fe1f50df8991d248d01a6cb2b2fe22b56747b2851bff342cf90e6ae3f3b5963f WatchSource:0}: Error finding container fe1f50df8991d248d01a6cb2b2fe22b56747b2851bff342cf90e6ae3f3b5963f: Status 404 returned error can't find the container with id fe1f50df8991d248d01a6cb2b2fe22b56747b2851bff342cf90e6ae3f3b5963f Nov 28 16:31:58 crc kubenswrapper[4647]: I1128 16:31:58.683289 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5cnz" event={"ID":"062fd167-6639-4714-aafa-8550731b2995","Type":"ContainerStarted","Data":"fe1f50df8991d248d01a6cb2b2fe22b56747b2851bff342cf90e6ae3f3b5963f"} Nov 28 16:31:59 crc kubenswrapper[4647]: I1128 16:31:59.695875 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5cnz" event={"ID":"062fd167-6639-4714-aafa-8550731b2995","Type":"ContainerStarted","Data":"f36a7d853bde9a1c650d7ae8fca1bd03f7dc0d287cfd5622503b7ce2a1fd3906"} Nov 28 16:32:00 crc kubenswrapper[4647]: I1128 16:32:00.709517 4647 generic.go:334] "Generic (PLEG): container finished" podID="062fd167-6639-4714-aafa-8550731b2995" containerID="f36a7d853bde9a1c650d7ae8fca1bd03f7dc0d287cfd5622503b7ce2a1fd3906" exitCode=0 Nov 28 16:32:00 crc kubenswrapper[4647]: I1128 16:32:00.709654 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5cnz" event={"ID":"062fd167-6639-4714-aafa-8550731b2995","Type":"ContainerDied","Data":"f36a7d853bde9a1c650d7ae8fca1bd03f7dc0d287cfd5622503b7ce2a1fd3906"} Nov 28 16:32:01 crc kubenswrapper[4647]: I1128 16:32:01.166126 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gdr9c"] Nov 28 16:32:01 crc kubenswrapper[4647]: I1128 16:32:01.166970 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gdr9c" podUID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerName="registry-server" containerID="cri-o://004c9f9494d905f94bb45a55eab0ad34eee4ede89bdfdf9fc20afa189a428e11" gracePeriod=2 Nov 28 16:32:01 crc kubenswrapper[4647]: I1128 16:32:01.723346 4647 generic.go:334] "Generic (PLEG): container finished" podID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerID="004c9f9494d905f94bb45a55eab0ad34eee4ede89bdfdf9fc20afa189a428e11" exitCode=0 Nov 28 16:32:01 crc kubenswrapper[4647]: I1128 16:32:01.723404 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gdr9c" event={"ID":"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e","Type":"ContainerDied","Data":"004c9f9494d905f94bb45a55eab0ad34eee4ede89bdfdf9fc20afa189a428e11"} Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.396449 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.571488 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-catalog-content\") pod \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.571879 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-utilities\") pod \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.571934 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xq9qt\" (UniqueName: \"kubernetes.io/projected/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-kube-api-access-xq9qt\") pod \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\" (UID: \"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e\") " Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.575684 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-utilities" (OuterVolumeSpecName: "utilities") pod "efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" (UID: "efd6a9c0-e68b-403c-9c5c-7b88c4356c1e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.585296 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-kube-api-access-xq9qt" (OuterVolumeSpecName: "kube-api-access-xq9qt") pod "efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" (UID: "efd6a9c0-e68b-403c-9c5c-7b88c4356c1e"). InnerVolumeSpecName "kube-api-access-xq9qt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.650109 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" (UID: "efd6a9c0-e68b-403c-9c5c-7b88c4356c1e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.674290 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.674328 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xq9qt\" (UniqueName: \"kubernetes.io/projected/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-kube-api-access-xq9qt\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.674339 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.736084 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5cnz" event={"ID":"062fd167-6639-4714-aafa-8550731b2995","Type":"ContainerStarted","Data":"2a8184d18ed15761ea4fbf58233c3a44010793fdcd08c934cf41d7b0e9673c18"} Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.741751 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gdr9c" event={"ID":"efd6a9c0-e68b-403c-9c5c-7b88c4356c1e","Type":"ContainerDied","Data":"f5b0c6a94ca1eb537e29ba8ee3e366b807b9f01626cfcd6ceb28cac4e0967d21"} Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.741984 4647 scope.go:117] "RemoveContainer" containerID="004c9f9494d905f94bb45a55eab0ad34eee4ede89bdfdf9fc20afa189a428e11" Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.741805 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gdr9c" Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.771199 4647 scope.go:117] "RemoveContainer" containerID="d3cff1f0e19639947860b34533cb30b2d02dc6408defeb3d9b67df2c068a2481" Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.794655 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gdr9c"] Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.824056 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gdr9c"] Nov 28 16:32:02 crc kubenswrapper[4647]: I1128 16:32:02.825633 4647 scope.go:117] "RemoveContainer" containerID="8607dd339b21bca899485683abb2f21792c3fdc5751862ba7d9a4baf22906be2" Nov 28 16:32:04 crc kubenswrapper[4647]: I1128 16:32:04.410537 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" path="/var/lib/kubelet/pods/efd6a9c0-e68b-403c-9c5c-7b88c4356c1e/volumes" Nov 28 16:32:04 crc kubenswrapper[4647]: I1128 16:32:04.772245 4647 generic.go:334] "Generic (PLEG): container finished" podID="062fd167-6639-4714-aafa-8550731b2995" containerID="2a8184d18ed15761ea4fbf58233c3a44010793fdcd08c934cf41d7b0e9673c18" exitCode=0 Nov 28 16:32:04 crc kubenswrapper[4647]: I1128 16:32:04.772288 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5cnz" event={"ID":"062fd167-6639-4714-aafa-8550731b2995","Type":"ContainerDied","Data":"2a8184d18ed15761ea4fbf58233c3a44010793fdcd08c934cf41d7b0e9673c18"} Nov 28 16:32:05 crc kubenswrapper[4647]: I1128 16:32:05.786935 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5cnz" event={"ID":"062fd167-6639-4714-aafa-8550731b2995","Type":"ContainerStarted","Data":"3a1787dfbff57489f318b2511a3c9869b42b91269838b949fb699864584011ca"} Nov 28 16:32:05 crc kubenswrapper[4647]: I1128 16:32:05.815366 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-c5cnz" podStartSLOduration=4.247680234 podStartE2EDuration="8.815346049s" podCreationTimestamp="2025-11-28 16:31:57 +0000 UTC" firstStartedPulling="2025-11-28 16:32:00.713197597 +0000 UTC m=+4050.560804018" lastFinishedPulling="2025-11-28 16:32:05.280863412 +0000 UTC m=+4055.128469833" observedRunningTime="2025-11-28 16:32:05.808790145 +0000 UTC m=+4055.656396566" watchObservedRunningTime="2025-11-28 16:32:05.815346049 +0000 UTC m=+4055.662952470" Nov 28 16:32:07 crc kubenswrapper[4647]: I1128 16:32:07.710067 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:32:07 crc kubenswrapper[4647]: I1128 16:32:07.710512 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:32:07 crc kubenswrapper[4647]: I1128 16:32:07.773071 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.022794 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.023512 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.023574 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.024506 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"69585191622efc9453f84702375ab57648d141ad8c8bdf73563158b9cd0e3fb1"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.024577 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://69585191622efc9453f84702375ab57648d141ad8c8bdf73563158b9cd0e3fb1" gracePeriod=600 Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.771316 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.839898 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c5cnz"] Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.927725 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="69585191622efc9453f84702375ab57648d141ad8c8bdf73563158b9cd0e3fb1" exitCode=0 Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.928040 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-c5cnz" podUID="062fd167-6639-4714-aafa-8550731b2995" containerName="registry-server" containerID="cri-o://3a1787dfbff57489f318b2511a3c9869b42b91269838b949fb699864584011ca" gracePeriod=2 Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.928161 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"69585191622efc9453f84702375ab57648d141ad8c8bdf73563158b9cd0e3fb1"} Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.928213 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83"} Nov 28 16:32:17 crc kubenswrapper[4647]: I1128 16:32:17.928241 4647 scope.go:117] "RemoveContainer" containerID="37d8a1e2b2c4d2e776008934497ce5446ace59bbf4fad9169bdc2666c0ccce43" Nov 28 16:32:18 crc kubenswrapper[4647]: I1128 16:32:18.977582 4647 generic.go:334] "Generic (PLEG): container finished" podID="062fd167-6639-4714-aafa-8550731b2995" containerID="3a1787dfbff57489f318b2511a3c9869b42b91269838b949fb699864584011ca" exitCode=0 Nov 28 16:32:18 crc kubenswrapper[4647]: I1128 16:32:18.977769 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5cnz" event={"ID":"062fd167-6639-4714-aafa-8550731b2995","Type":"ContainerDied","Data":"3a1787dfbff57489f318b2511a3c9869b42b91269838b949fb699864584011ca"} Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.542665 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.711965 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-utilities\") pod \"062fd167-6639-4714-aafa-8550731b2995\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.712527 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-catalog-content\") pod \"062fd167-6639-4714-aafa-8550731b2995\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.712665 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dpq7\" (UniqueName: \"kubernetes.io/projected/062fd167-6639-4714-aafa-8550731b2995-kube-api-access-8dpq7\") pod \"062fd167-6639-4714-aafa-8550731b2995\" (UID: \"062fd167-6639-4714-aafa-8550731b2995\") " Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.712982 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-utilities" (OuterVolumeSpecName: "utilities") pod "062fd167-6639-4714-aafa-8550731b2995" (UID: "062fd167-6639-4714-aafa-8550731b2995"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.713721 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.724903 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/062fd167-6639-4714-aafa-8550731b2995-kube-api-access-8dpq7" (OuterVolumeSpecName: "kube-api-access-8dpq7") pod "062fd167-6639-4714-aafa-8550731b2995" (UID: "062fd167-6639-4714-aafa-8550731b2995"). InnerVolumeSpecName "kube-api-access-8dpq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.783071 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "062fd167-6639-4714-aafa-8550731b2995" (UID: "062fd167-6639-4714-aafa-8550731b2995"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.815611 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/062fd167-6639-4714-aafa-8550731b2995-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.815661 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dpq7\" (UniqueName: \"kubernetes.io/projected/062fd167-6639-4714-aafa-8550731b2995-kube-api-access-8dpq7\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.996423 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5cnz" event={"ID":"062fd167-6639-4714-aafa-8550731b2995","Type":"ContainerDied","Data":"fe1f50df8991d248d01a6cb2b2fe22b56747b2851bff342cf90e6ae3f3b5963f"} Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.996487 4647 scope.go:117] "RemoveContainer" containerID="3a1787dfbff57489f318b2511a3c9869b42b91269838b949fb699864584011ca" Nov 28 16:32:19 crc kubenswrapper[4647]: I1128 16:32:19.996652 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c5cnz" Nov 28 16:32:20 crc kubenswrapper[4647]: I1128 16:32:20.048883 4647 scope.go:117] "RemoveContainer" containerID="2a8184d18ed15761ea4fbf58233c3a44010793fdcd08c934cf41d7b0e9673c18" Nov 28 16:32:20 crc kubenswrapper[4647]: I1128 16:32:20.063029 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c5cnz"] Nov 28 16:32:20 crc kubenswrapper[4647]: I1128 16:32:20.080261 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-c5cnz"] Nov 28 16:32:20 crc kubenswrapper[4647]: I1128 16:32:20.110167 4647 scope.go:117] "RemoveContainer" containerID="f36a7d853bde9a1c650d7ae8fca1bd03f7dc0d287cfd5622503b7ce2a1fd3906" Nov 28 16:32:20 crc kubenswrapper[4647]: E1128 16:32:20.162101 4647 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod062fd167_6639_4714_aafa_8550731b2995.slice\": RecentStats: unable to find data in memory cache]" Nov 28 16:32:20 crc kubenswrapper[4647]: I1128 16:32:20.407647 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="062fd167-6639-4714-aafa-8550731b2995" path="/var/lib/kubelet/pods/062fd167-6639-4714-aafa-8550731b2995/volumes" Nov 28 16:34:17 crc kubenswrapper[4647]: I1128 16:34:17.022668 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:34:17 crc kubenswrapper[4647]: I1128 16:34:17.023779 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:34:47 crc kubenswrapper[4647]: I1128 16:34:47.022719 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:34:47 crc kubenswrapper[4647]: I1128 16:34:47.023650 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:35:17 crc kubenswrapper[4647]: I1128 16:35:17.023062 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:35:17 crc kubenswrapper[4647]: I1128 16:35:17.024539 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:35:17 crc kubenswrapper[4647]: I1128 16:35:17.024595 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:35:17 crc kubenswrapper[4647]: I1128 16:35:17.025597 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:35:17 crc kubenswrapper[4647]: I1128 16:35:17.025741 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" gracePeriod=600 Nov 28 16:35:17 crc kubenswrapper[4647]: E1128 16:35:17.147198 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:35:18 crc kubenswrapper[4647]: I1128 16:35:18.013386 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" exitCode=0 Nov 28 16:35:18 crc kubenswrapper[4647]: I1128 16:35:18.013460 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83"} Nov 28 16:35:18 crc kubenswrapper[4647]: I1128 16:35:18.013502 4647 scope.go:117] "RemoveContainer" containerID="69585191622efc9453f84702375ab57648d141ad8c8bdf73563158b9cd0e3fb1" Nov 28 16:35:18 crc kubenswrapper[4647]: I1128 16:35:18.014162 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:35:18 crc kubenswrapper[4647]: E1128 16:35:18.014496 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:35:30 crc kubenswrapper[4647]: I1128 16:35:30.415484 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:35:30 crc kubenswrapper[4647]: E1128 16:35:30.417323 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:35:44 crc kubenswrapper[4647]: I1128 16:35:44.394672 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:35:44 crc kubenswrapper[4647]: E1128 16:35:44.395644 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:35:57 crc kubenswrapper[4647]: I1128 16:35:57.394365 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:35:57 crc kubenswrapper[4647]: E1128 16:35:57.395195 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:36:11 crc kubenswrapper[4647]: I1128 16:36:11.395307 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:36:11 crc kubenswrapper[4647]: E1128 16:36:11.396630 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:36:26 crc kubenswrapper[4647]: I1128 16:36:26.395003 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:36:26 crc kubenswrapper[4647]: E1128 16:36:26.396214 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:36:39 crc kubenswrapper[4647]: I1128 16:36:39.395303 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:36:39 crc kubenswrapper[4647]: E1128 16:36:39.396497 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:36:54 crc kubenswrapper[4647]: I1128 16:36:54.395445 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:36:54 crc kubenswrapper[4647]: E1128 16:36:54.398782 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:37:05 crc kubenswrapper[4647]: I1128 16:37:05.395126 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:37:05 crc kubenswrapper[4647]: E1128 16:37:05.396105 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:37:20 crc kubenswrapper[4647]: I1128 16:37:20.403122 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:37:20 crc kubenswrapper[4647]: E1128 16:37:20.404230 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:37:34 crc kubenswrapper[4647]: I1128 16:37:34.395711 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:37:34 crc kubenswrapper[4647]: E1128 16:37:34.396450 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:37:46 crc kubenswrapper[4647]: I1128 16:37:46.395104 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:37:46 crc kubenswrapper[4647]: E1128 16:37:46.395800 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:37:57 crc kubenswrapper[4647]: I1128 16:37:57.394772 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:37:57 crc kubenswrapper[4647]: E1128 16:37:57.395671 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:38:12 crc kubenswrapper[4647]: I1128 16:38:12.394221 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:38:12 crc kubenswrapper[4647]: E1128 16:38:12.394996 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:38:24 crc kubenswrapper[4647]: I1128 16:38:24.395220 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:38:24 crc kubenswrapper[4647]: E1128 16:38:24.396344 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:38:39 crc kubenswrapper[4647]: I1128 16:38:39.394599 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:38:39 crc kubenswrapper[4647]: E1128 16:38:39.396867 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:38:43 crc kubenswrapper[4647]: I1128 16:38:43.448573 4647 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-2s6l7 container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 16:38:43 crc kubenswrapper[4647]: I1128 16:38:43.449127 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2s6l7" podUID="c044b720-8bbd-4d48-a61b-d37188cfa478" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:38:54 crc kubenswrapper[4647]: I1128 16:38:54.401032 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:38:54 crc kubenswrapper[4647]: E1128 16:38:54.402367 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:39:06 crc kubenswrapper[4647]: I1128 16:39:06.396807 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:39:06 crc kubenswrapper[4647]: E1128 16:39:06.398627 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:39:19 crc kubenswrapper[4647]: I1128 16:39:19.395428 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:39:19 crc kubenswrapper[4647]: E1128 16:39:19.396261 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:39:32 crc kubenswrapper[4647]: I1128 16:39:32.394177 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:39:32 crc kubenswrapper[4647]: E1128 16:39:32.394919 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:39:46 crc kubenswrapper[4647]: I1128 16:39:46.395081 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:39:46 crc kubenswrapper[4647]: E1128 16:39:46.395831 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:39:58 crc kubenswrapper[4647]: I1128 16:39:58.394290 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:39:58 crc kubenswrapper[4647]: E1128 16:39:58.395046 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:40:11 crc kubenswrapper[4647]: I1128 16:40:11.395126 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:40:11 crc kubenswrapper[4647]: E1128 16:40:11.396461 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.918161 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-485hh"] Nov 28 16:40:18 crc kubenswrapper[4647]: E1128 16:40:18.919216 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="062fd167-6639-4714-aafa-8550731b2995" containerName="registry-server" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.919229 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="062fd167-6639-4714-aafa-8550731b2995" containerName="registry-server" Nov 28 16:40:18 crc kubenswrapper[4647]: E1128 16:40:18.919262 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerName="extract-content" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.919268 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerName="extract-content" Nov 28 16:40:18 crc kubenswrapper[4647]: E1128 16:40:18.919284 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="062fd167-6639-4714-aafa-8550731b2995" containerName="extract-content" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.919290 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="062fd167-6639-4714-aafa-8550731b2995" containerName="extract-content" Nov 28 16:40:18 crc kubenswrapper[4647]: E1128 16:40:18.919303 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerName="registry-server" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.919309 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerName="registry-server" Nov 28 16:40:18 crc kubenswrapper[4647]: E1128 16:40:18.919320 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerName="extract-utilities" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.919326 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerName="extract-utilities" Nov 28 16:40:18 crc kubenswrapper[4647]: E1128 16:40:18.919337 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="062fd167-6639-4714-aafa-8550731b2995" containerName="extract-utilities" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.919343 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="062fd167-6639-4714-aafa-8550731b2995" containerName="extract-utilities" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.919561 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="062fd167-6639-4714-aafa-8550731b2995" containerName="registry-server" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.919585 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="efd6a9c0-e68b-403c-9c5c-7b88c4356c1e" containerName="registry-server" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.925624 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:18 crc kubenswrapper[4647]: I1128 16:40:18.948662 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-485hh"] Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.074293 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pg6wx\" (UniqueName: \"kubernetes.io/projected/a326cca6-473c-4395-b651-17624647d0da-kube-api-access-pg6wx\") pod \"redhat-operators-485hh\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.074396 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-catalog-content\") pod \"redhat-operators-485hh\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.074488 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-utilities\") pod \"redhat-operators-485hh\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.176381 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pg6wx\" (UniqueName: \"kubernetes.io/projected/a326cca6-473c-4395-b651-17624647d0da-kube-api-access-pg6wx\") pod \"redhat-operators-485hh\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.176546 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-catalog-content\") pod \"redhat-operators-485hh\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.176603 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-utilities\") pod \"redhat-operators-485hh\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.177048 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-catalog-content\") pod \"redhat-operators-485hh\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.177064 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-utilities\") pod \"redhat-operators-485hh\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.203203 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pg6wx\" (UniqueName: \"kubernetes.io/projected/a326cca6-473c-4395-b651-17624647d0da-kube-api-access-pg6wx\") pod \"redhat-operators-485hh\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.247287 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:19 crc kubenswrapper[4647]: I1128 16:40:19.800577 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-485hh"] Nov 28 16:40:20 crc kubenswrapper[4647]: I1128 16:40:20.225478 4647 generic.go:334] "Generic (PLEG): container finished" podID="a326cca6-473c-4395-b651-17624647d0da" containerID="7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4" exitCode=0 Nov 28 16:40:20 crc kubenswrapper[4647]: I1128 16:40:20.225637 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-485hh" event={"ID":"a326cca6-473c-4395-b651-17624647d0da","Type":"ContainerDied","Data":"7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4"} Nov 28 16:40:20 crc kubenswrapper[4647]: I1128 16:40:20.225799 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-485hh" event={"ID":"a326cca6-473c-4395-b651-17624647d0da","Type":"ContainerStarted","Data":"e6502b3d80ab2a6812affe4363e2f3fe88e7edbfad0bf63d10c9c37194dc6b15"} Nov 28 16:40:20 crc kubenswrapper[4647]: I1128 16:40:20.227876 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:40:22 crc kubenswrapper[4647]: I1128 16:40:22.249853 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-485hh" event={"ID":"a326cca6-473c-4395-b651-17624647d0da","Type":"ContainerStarted","Data":"6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850"} Nov 28 16:40:24 crc kubenswrapper[4647]: I1128 16:40:24.401907 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:40:27 crc kubenswrapper[4647]: I1128 16:40:27.339207 4647 generic.go:334] "Generic (PLEG): container finished" podID="a326cca6-473c-4395-b651-17624647d0da" containerID="6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850" exitCode=0 Nov 28 16:40:27 crc kubenswrapper[4647]: I1128 16:40:27.340040 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-485hh" event={"ID":"a326cca6-473c-4395-b651-17624647d0da","Type":"ContainerDied","Data":"6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850"} Nov 28 16:40:28 crc kubenswrapper[4647]: I1128 16:40:28.355588 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-485hh" event={"ID":"a326cca6-473c-4395-b651-17624647d0da","Type":"ContainerStarted","Data":"f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9"} Nov 28 16:40:28 crc kubenswrapper[4647]: I1128 16:40:28.359189 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"4b3bdace43b3abfee088ee84cf4e15cd15d4145f6575c446ae8e0bf78daa491b"} Nov 28 16:40:28 crc kubenswrapper[4647]: I1128 16:40:28.428576 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-485hh" podStartSLOduration=2.673271447 podStartE2EDuration="10.428321843s" podCreationTimestamp="2025-11-28 16:40:18 +0000 UTC" firstStartedPulling="2025-11-28 16:40:20.22763257 +0000 UTC m=+4550.075238991" lastFinishedPulling="2025-11-28 16:40:27.982682966 +0000 UTC m=+4557.830289387" observedRunningTime="2025-11-28 16:40:28.38263728 +0000 UTC m=+4558.230243711" watchObservedRunningTime="2025-11-28 16:40:28.428321843 +0000 UTC m=+4558.275928404" Nov 28 16:40:29 crc kubenswrapper[4647]: I1128 16:40:29.247623 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:29 crc kubenswrapper[4647]: I1128 16:40:29.248041 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:29 crc kubenswrapper[4647]: I1128 16:40:29.893798 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9zwpb"] Nov 28 16:40:29 crc kubenswrapper[4647]: I1128 16:40:29.896723 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:29 crc kubenswrapper[4647]: I1128 16:40:29.914834 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9zwpb"] Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.047989 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-utilities\") pod \"redhat-marketplace-9zwpb\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.048108 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtvrj\" (UniqueName: \"kubernetes.io/projected/4911f59f-f909-4620-8721-fe3ebf49fc0a-kube-api-access-mtvrj\") pod \"redhat-marketplace-9zwpb\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.048176 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-catalog-content\") pod \"redhat-marketplace-9zwpb\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.150380 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-utilities\") pod \"redhat-marketplace-9zwpb\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.150558 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtvrj\" (UniqueName: \"kubernetes.io/projected/4911f59f-f909-4620-8721-fe3ebf49fc0a-kube-api-access-mtvrj\") pod \"redhat-marketplace-9zwpb\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.150632 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-catalog-content\") pod \"redhat-marketplace-9zwpb\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.151005 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-utilities\") pod \"redhat-marketplace-9zwpb\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.151026 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-catalog-content\") pod \"redhat-marketplace-9zwpb\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.173314 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtvrj\" (UniqueName: \"kubernetes.io/projected/4911f59f-f909-4620-8721-fe3ebf49fc0a-kube-api-access-mtvrj\") pod \"redhat-marketplace-9zwpb\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.216341 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.333974 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-485hh" podUID="a326cca6-473c-4395-b651-17624647d0da" containerName="registry-server" probeResult="failure" output=< Nov 28 16:40:30 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 16:40:30 crc kubenswrapper[4647]: > Nov 28 16:40:30 crc kubenswrapper[4647]: I1128 16:40:30.833842 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9zwpb"] Nov 28 16:40:31 crc kubenswrapper[4647]: I1128 16:40:31.412321 4647 generic.go:334] "Generic (PLEG): container finished" podID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerID="267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb" exitCode=0 Nov 28 16:40:31 crc kubenswrapper[4647]: I1128 16:40:31.412484 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zwpb" event={"ID":"4911f59f-f909-4620-8721-fe3ebf49fc0a","Type":"ContainerDied","Data":"267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb"} Nov 28 16:40:31 crc kubenswrapper[4647]: I1128 16:40:31.412641 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zwpb" event={"ID":"4911f59f-f909-4620-8721-fe3ebf49fc0a","Type":"ContainerStarted","Data":"5e4754debef32bb04060dd772e969bd4355d9a0409bd02c621472921bc460c8d"} Nov 28 16:40:33 crc kubenswrapper[4647]: I1128 16:40:33.432599 4647 generic.go:334] "Generic (PLEG): container finished" podID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerID="eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c" exitCode=0 Nov 28 16:40:33 crc kubenswrapper[4647]: I1128 16:40:33.432653 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zwpb" event={"ID":"4911f59f-f909-4620-8721-fe3ebf49fc0a","Type":"ContainerDied","Data":"eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c"} Nov 28 16:40:35 crc kubenswrapper[4647]: I1128 16:40:35.455450 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zwpb" event={"ID":"4911f59f-f909-4620-8721-fe3ebf49fc0a","Type":"ContainerStarted","Data":"41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f"} Nov 28 16:40:35 crc kubenswrapper[4647]: I1128 16:40:35.477227 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9zwpb" podStartSLOduration=3.48838383 podStartE2EDuration="6.477203248s" podCreationTimestamp="2025-11-28 16:40:29 +0000 UTC" firstStartedPulling="2025-11-28 16:40:31.414135251 +0000 UTC m=+4561.261741672" lastFinishedPulling="2025-11-28 16:40:34.402954669 +0000 UTC m=+4564.250561090" observedRunningTime="2025-11-28 16:40:35.476310694 +0000 UTC m=+4565.323917115" watchObservedRunningTime="2025-11-28 16:40:35.477203248 +0000 UTC m=+4565.324809669" Nov 28 16:40:39 crc kubenswrapper[4647]: I1128 16:40:39.305436 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:39 crc kubenswrapper[4647]: I1128 16:40:39.370073 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:39 crc kubenswrapper[4647]: I1128 16:40:39.879435 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-485hh"] Nov 28 16:40:40 crc kubenswrapper[4647]: I1128 16:40:40.216517 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:40 crc kubenswrapper[4647]: I1128 16:40:40.216575 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:40 crc kubenswrapper[4647]: I1128 16:40:40.272364 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:40 crc kubenswrapper[4647]: I1128 16:40:40.499651 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-485hh" podUID="a326cca6-473c-4395-b651-17624647d0da" containerName="registry-server" containerID="cri-o://f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9" gracePeriod=2 Nov 28 16:40:40 crc kubenswrapper[4647]: I1128 16:40:40.552898 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.013739 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.098383 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pg6wx\" (UniqueName: \"kubernetes.io/projected/a326cca6-473c-4395-b651-17624647d0da-kube-api-access-pg6wx\") pod \"a326cca6-473c-4395-b651-17624647d0da\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.098565 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-utilities\") pod \"a326cca6-473c-4395-b651-17624647d0da\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.098776 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-catalog-content\") pod \"a326cca6-473c-4395-b651-17624647d0da\" (UID: \"a326cca6-473c-4395-b651-17624647d0da\") " Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.099525 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-utilities" (OuterVolumeSpecName: "utilities") pod "a326cca6-473c-4395-b651-17624647d0da" (UID: "a326cca6-473c-4395-b651-17624647d0da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.121975 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a326cca6-473c-4395-b651-17624647d0da-kube-api-access-pg6wx" (OuterVolumeSpecName: "kube-api-access-pg6wx") pod "a326cca6-473c-4395-b651-17624647d0da" (UID: "a326cca6-473c-4395-b651-17624647d0da"). InnerVolumeSpecName "kube-api-access-pg6wx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.124267 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pg6wx\" (UniqueName: \"kubernetes.io/projected/a326cca6-473c-4395-b651-17624647d0da-kube-api-access-pg6wx\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.124313 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.241789 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a326cca6-473c-4395-b651-17624647d0da" (UID: "a326cca6-473c-4395-b651-17624647d0da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.328519 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a326cca6-473c-4395-b651-17624647d0da-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.514564 4647 generic.go:334] "Generic (PLEG): container finished" podID="a326cca6-473c-4395-b651-17624647d0da" containerID="f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9" exitCode=0 Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.514844 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-485hh" event={"ID":"a326cca6-473c-4395-b651-17624647d0da","Type":"ContainerDied","Data":"f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9"} Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.514906 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-485hh" event={"ID":"a326cca6-473c-4395-b651-17624647d0da","Type":"ContainerDied","Data":"e6502b3d80ab2a6812affe4363e2f3fe88e7edbfad0bf63d10c9c37194dc6b15"} Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.514932 4647 scope.go:117] "RemoveContainer" containerID="f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.514939 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-485hh" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.556397 4647 scope.go:117] "RemoveContainer" containerID="6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.570880 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-485hh"] Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.582716 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-485hh"] Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.601611 4647 scope.go:117] "RemoveContainer" containerID="7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.633919 4647 scope.go:117] "RemoveContainer" containerID="f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9" Nov 28 16:40:41 crc kubenswrapper[4647]: E1128 16:40:41.634903 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9\": container with ID starting with f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9 not found: ID does not exist" containerID="f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.634963 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9"} err="failed to get container status \"f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9\": rpc error: code = NotFound desc = could not find container \"f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9\": container with ID starting with f6046ce205b55b64e4988011b360f4074b8b772ab9e415aba596a3eb6c0c56c9 not found: ID does not exist" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.635002 4647 scope.go:117] "RemoveContainer" containerID="6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850" Nov 28 16:40:41 crc kubenswrapper[4647]: E1128 16:40:41.635500 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850\": container with ID starting with 6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850 not found: ID does not exist" containerID="6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.635636 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850"} err="failed to get container status \"6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850\": rpc error: code = NotFound desc = could not find container \"6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850\": container with ID starting with 6c89036425766a0fabd187979071e8e5cb74169e6f8bb6cab6c518e8e74c3850 not found: ID does not exist" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.635749 4647 scope.go:117] "RemoveContainer" containerID="7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4" Nov 28 16:40:41 crc kubenswrapper[4647]: E1128 16:40:41.636207 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4\": container with ID starting with 7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4 not found: ID does not exist" containerID="7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4" Nov 28 16:40:41 crc kubenswrapper[4647]: I1128 16:40:41.636248 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4"} err="failed to get container status \"7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4\": rpc error: code = NotFound desc = could not find container \"7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4\": container with ID starting with 7b2ae744544b3d928f527d52d8826ba1f3afef85528f9413930ecdfdfed8aae4 not found: ID does not exist" Nov 28 16:40:42 crc kubenswrapper[4647]: I1128 16:40:42.412078 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a326cca6-473c-4395-b651-17624647d0da" path="/var/lib/kubelet/pods/a326cca6-473c-4395-b651-17624647d0da/volumes" Nov 28 16:40:42 crc kubenswrapper[4647]: I1128 16:40:42.680583 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9zwpb"] Nov 28 16:40:42 crc kubenswrapper[4647]: I1128 16:40:42.680811 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9zwpb" podUID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerName="registry-server" containerID="cri-o://41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f" gracePeriod=2 Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.424922 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.480634 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-utilities\") pod \"4911f59f-f909-4620-8721-fe3ebf49fc0a\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.480704 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-catalog-content\") pod \"4911f59f-f909-4620-8721-fe3ebf49fc0a\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.480988 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtvrj\" (UniqueName: \"kubernetes.io/projected/4911f59f-f909-4620-8721-fe3ebf49fc0a-kube-api-access-mtvrj\") pod \"4911f59f-f909-4620-8721-fe3ebf49fc0a\" (UID: \"4911f59f-f909-4620-8721-fe3ebf49fc0a\") " Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.481675 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-utilities" (OuterVolumeSpecName: "utilities") pod "4911f59f-f909-4620-8721-fe3ebf49fc0a" (UID: "4911f59f-f909-4620-8721-fe3ebf49fc0a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.488511 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4911f59f-f909-4620-8721-fe3ebf49fc0a-kube-api-access-mtvrj" (OuterVolumeSpecName: "kube-api-access-mtvrj") pod "4911f59f-f909-4620-8721-fe3ebf49fc0a" (UID: "4911f59f-f909-4620-8721-fe3ebf49fc0a"). InnerVolumeSpecName "kube-api-access-mtvrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.509080 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4911f59f-f909-4620-8721-fe3ebf49fc0a" (UID: "4911f59f-f909-4620-8721-fe3ebf49fc0a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.539260 4647 generic.go:334] "Generic (PLEG): container finished" podID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerID="41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f" exitCode=0 Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.539305 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zwpb" event={"ID":"4911f59f-f909-4620-8721-fe3ebf49fc0a","Type":"ContainerDied","Data":"41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f"} Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.539332 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zwpb" event={"ID":"4911f59f-f909-4620-8721-fe3ebf49fc0a","Type":"ContainerDied","Data":"5e4754debef32bb04060dd772e969bd4355d9a0409bd02c621472921bc460c8d"} Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.539348 4647 scope.go:117] "RemoveContainer" containerID="41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.539471 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9zwpb" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.587774 4647 scope.go:117] "RemoveContainer" containerID="eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.588276 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.588322 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4911f59f-f909-4620-8721-fe3ebf49fc0a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.588338 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mtvrj\" (UniqueName: \"kubernetes.io/projected/4911f59f-f909-4620-8721-fe3ebf49fc0a-kube-api-access-mtvrj\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.596937 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9zwpb"] Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.611928 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9zwpb"] Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.636358 4647 scope.go:117] "RemoveContainer" containerID="267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.672236 4647 scope.go:117] "RemoveContainer" containerID="41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f" Nov 28 16:40:43 crc kubenswrapper[4647]: E1128 16:40:43.672712 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f\": container with ID starting with 41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f not found: ID does not exist" containerID="41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.672776 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f"} err="failed to get container status \"41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f\": rpc error: code = NotFound desc = could not find container \"41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f\": container with ID starting with 41e606532c1de833ad77986a31de50cb0c78e0c17ad3d54ab565bf99c0eb9f8f not found: ID does not exist" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.672808 4647 scope.go:117] "RemoveContainer" containerID="eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c" Nov 28 16:40:43 crc kubenswrapper[4647]: E1128 16:40:43.673169 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c\": container with ID starting with eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c not found: ID does not exist" containerID="eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.673194 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c"} err="failed to get container status \"eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c\": rpc error: code = NotFound desc = could not find container \"eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c\": container with ID starting with eb375b999e048b7a04da2c841b14e7cbec03d1af6431b5d2abf7451843627a7c not found: ID does not exist" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.673211 4647 scope.go:117] "RemoveContainer" containerID="267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb" Nov 28 16:40:43 crc kubenswrapper[4647]: E1128 16:40:43.673572 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb\": container with ID starting with 267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb not found: ID does not exist" containerID="267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb" Nov 28 16:40:43 crc kubenswrapper[4647]: I1128 16:40:43.673594 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb"} err="failed to get container status \"267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb\": rpc error: code = NotFound desc = could not find container \"267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb\": container with ID starting with 267578c034b918bd8f10df950eb7aa24259996ad961c9b0312cf781a39b5e1bb not found: ID does not exist" Nov 28 16:40:44 crc kubenswrapper[4647]: I1128 16:40:44.406492 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4911f59f-f909-4620-8721-fe3ebf49fc0a" path="/var/lib/kubelet/pods/4911f59f-f909-4620-8721-fe3ebf49fc0a/volumes" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.022869 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.024448 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.487383 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rcks7"] Nov 28 16:42:47 crc kubenswrapper[4647]: E1128 16:42:47.488255 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerName="registry-server" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.488283 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerName="registry-server" Nov 28 16:42:47 crc kubenswrapper[4647]: E1128 16:42:47.488310 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a326cca6-473c-4395-b651-17624647d0da" containerName="extract-content" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.488320 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a326cca6-473c-4395-b651-17624647d0da" containerName="extract-content" Nov 28 16:42:47 crc kubenswrapper[4647]: E1128 16:42:47.488337 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerName="extract-utilities" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.488349 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerName="extract-utilities" Nov 28 16:42:47 crc kubenswrapper[4647]: E1128 16:42:47.488365 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerName="extract-content" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.488373 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerName="extract-content" Nov 28 16:42:47 crc kubenswrapper[4647]: E1128 16:42:47.488402 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a326cca6-473c-4395-b651-17624647d0da" containerName="extract-utilities" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.488432 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a326cca6-473c-4395-b651-17624647d0da" containerName="extract-utilities" Nov 28 16:42:47 crc kubenswrapper[4647]: E1128 16:42:47.488467 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a326cca6-473c-4395-b651-17624647d0da" containerName="registry-server" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.488475 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="a326cca6-473c-4395-b651-17624647d0da" containerName="registry-server" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.488717 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="a326cca6-473c-4395-b651-17624647d0da" containerName="registry-server" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.488762 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="4911f59f-f909-4620-8721-fe3ebf49fc0a" containerName="registry-server" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.490873 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.503212 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rcks7"] Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.572954 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67pvp\" (UniqueName: \"kubernetes.io/projected/963113e3-a124-41d7-bb7b-e06e135bb523-kube-api-access-67pvp\") pod \"community-operators-rcks7\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.573124 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-utilities\") pod \"community-operators-rcks7\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.573180 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-catalog-content\") pod \"community-operators-rcks7\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.675087 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-utilities\") pod \"community-operators-rcks7\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.675176 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-catalog-content\") pod \"community-operators-rcks7\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.675370 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67pvp\" (UniqueName: \"kubernetes.io/projected/963113e3-a124-41d7-bb7b-e06e135bb523-kube-api-access-67pvp\") pod \"community-operators-rcks7\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.676050 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-catalog-content\") pod \"community-operators-rcks7\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.676516 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-utilities\") pod \"community-operators-rcks7\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.708759 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67pvp\" (UniqueName: \"kubernetes.io/projected/963113e3-a124-41d7-bb7b-e06e135bb523-kube-api-access-67pvp\") pod \"community-operators-rcks7\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:47 crc kubenswrapper[4647]: I1128 16:42:47.818541 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:48 crc kubenswrapper[4647]: I1128 16:42:48.488705 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rcks7"] Nov 28 16:42:49 crc kubenswrapper[4647]: I1128 16:42:49.029045 4647 generic.go:334] "Generic (PLEG): container finished" podID="963113e3-a124-41d7-bb7b-e06e135bb523" containerID="774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29" exitCode=0 Nov 28 16:42:49 crc kubenswrapper[4647]: I1128 16:42:49.029147 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcks7" event={"ID":"963113e3-a124-41d7-bb7b-e06e135bb523","Type":"ContainerDied","Data":"774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29"} Nov 28 16:42:49 crc kubenswrapper[4647]: I1128 16:42:49.029557 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcks7" event={"ID":"963113e3-a124-41d7-bb7b-e06e135bb523","Type":"ContainerStarted","Data":"d2be5f4247cec830a731479d79ae7645650e608af2442be89713b5bf41cbc2e3"} Nov 28 16:42:50 crc kubenswrapper[4647]: I1128 16:42:50.042593 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcks7" event={"ID":"963113e3-a124-41d7-bb7b-e06e135bb523","Type":"ContainerStarted","Data":"32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d"} Nov 28 16:42:51 crc kubenswrapper[4647]: I1128 16:42:51.054230 4647 generic.go:334] "Generic (PLEG): container finished" podID="963113e3-a124-41d7-bb7b-e06e135bb523" containerID="32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d" exitCode=0 Nov 28 16:42:51 crc kubenswrapper[4647]: I1128 16:42:51.054462 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcks7" event={"ID":"963113e3-a124-41d7-bb7b-e06e135bb523","Type":"ContainerDied","Data":"32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d"} Nov 28 16:42:52 crc kubenswrapper[4647]: I1128 16:42:52.072237 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcks7" event={"ID":"963113e3-a124-41d7-bb7b-e06e135bb523","Type":"ContainerStarted","Data":"26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40"} Nov 28 16:42:52 crc kubenswrapper[4647]: I1128 16:42:52.103255 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rcks7" podStartSLOduration=2.5703461709999997 podStartE2EDuration="5.103220979s" podCreationTimestamp="2025-11-28 16:42:47 +0000 UTC" firstStartedPulling="2025-11-28 16:42:49.032735304 +0000 UTC m=+4698.880341725" lastFinishedPulling="2025-11-28 16:42:51.565610092 +0000 UTC m=+4701.413216533" observedRunningTime="2025-11-28 16:42:52.094277392 +0000 UTC m=+4701.941883813" watchObservedRunningTime="2025-11-28 16:42:52.103220979 +0000 UTC m=+4701.950827400" Nov 28 16:42:57 crc kubenswrapper[4647]: I1128 16:42:57.820511 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:57 crc kubenswrapper[4647]: I1128 16:42:57.821007 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:57 crc kubenswrapper[4647]: I1128 16:42:57.892213 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:58 crc kubenswrapper[4647]: I1128 16:42:58.204405 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:42:58 crc kubenswrapper[4647]: I1128 16:42:58.269929 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rcks7"] Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.156573 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rcks7" podUID="963113e3-a124-41d7-bb7b-e06e135bb523" containerName="registry-server" containerID="cri-o://26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40" gracePeriod=2 Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.684079 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.790244 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67pvp\" (UniqueName: \"kubernetes.io/projected/963113e3-a124-41d7-bb7b-e06e135bb523-kube-api-access-67pvp\") pod \"963113e3-a124-41d7-bb7b-e06e135bb523\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.790395 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-utilities\") pod \"963113e3-a124-41d7-bb7b-e06e135bb523\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.790581 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-catalog-content\") pod \"963113e3-a124-41d7-bb7b-e06e135bb523\" (UID: \"963113e3-a124-41d7-bb7b-e06e135bb523\") " Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.791289 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-utilities" (OuterVolumeSpecName: "utilities") pod "963113e3-a124-41d7-bb7b-e06e135bb523" (UID: "963113e3-a124-41d7-bb7b-e06e135bb523"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.799625 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/963113e3-a124-41d7-bb7b-e06e135bb523-kube-api-access-67pvp" (OuterVolumeSpecName: "kube-api-access-67pvp") pod "963113e3-a124-41d7-bb7b-e06e135bb523" (UID: "963113e3-a124-41d7-bb7b-e06e135bb523"). InnerVolumeSpecName "kube-api-access-67pvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.861721 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "963113e3-a124-41d7-bb7b-e06e135bb523" (UID: "963113e3-a124-41d7-bb7b-e06e135bb523"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.893180 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.893210 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67pvp\" (UniqueName: \"kubernetes.io/projected/963113e3-a124-41d7-bb7b-e06e135bb523-kube-api-access-67pvp\") on node \"crc\" DevicePath \"\"" Nov 28 16:43:00 crc kubenswrapper[4647]: I1128 16:43:00.893234 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/963113e3-a124-41d7-bb7b-e06e135bb523-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.178266 4647 generic.go:334] "Generic (PLEG): container finished" podID="963113e3-a124-41d7-bb7b-e06e135bb523" containerID="26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40" exitCode=0 Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.178343 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rcks7" Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.178338 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcks7" event={"ID":"963113e3-a124-41d7-bb7b-e06e135bb523","Type":"ContainerDied","Data":"26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40"} Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.178492 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcks7" event={"ID":"963113e3-a124-41d7-bb7b-e06e135bb523","Type":"ContainerDied","Data":"d2be5f4247cec830a731479d79ae7645650e608af2442be89713b5bf41cbc2e3"} Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.178540 4647 scope.go:117] "RemoveContainer" containerID="26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40" Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.278478 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rcks7"] Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.280641 4647 scope.go:117] "RemoveContainer" containerID="32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d" Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.288328 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rcks7"] Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.306020 4647 scope.go:117] "RemoveContainer" containerID="774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29" Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.351926 4647 scope.go:117] "RemoveContainer" containerID="26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40" Nov 28 16:43:01 crc kubenswrapper[4647]: E1128 16:43:01.352675 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40\": container with ID starting with 26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40 not found: ID does not exist" containerID="26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40" Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.352726 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40"} err="failed to get container status \"26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40\": rpc error: code = NotFound desc = could not find container \"26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40\": container with ID starting with 26db83ca0d526a91458b639cb59fe669060eae81b377e458376c327b445b7d40 not found: ID does not exist" Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.352757 4647 scope.go:117] "RemoveContainer" containerID="32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d" Nov 28 16:43:01 crc kubenswrapper[4647]: E1128 16:43:01.353188 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d\": container with ID starting with 32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d not found: ID does not exist" containerID="32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d" Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.353223 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d"} err="failed to get container status \"32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d\": rpc error: code = NotFound desc = could not find container \"32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d\": container with ID starting with 32537a7b27cadfb3e250239e85b8ae292effbe62a9ff7896144e04f322aae16d not found: ID does not exist" Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.353242 4647 scope.go:117] "RemoveContainer" containerID="774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29" Nov 28 16:43:01 crc kubenswrapper[4647]: E1128 16:43:01.353621 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29\": container with ID starting with 774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29 not found: ID does not exist" containerID="774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29" Nov 28 16:43:01 crc kubenswrapper[4647]: I1128 16:43:01.353684 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29"} err="failed to get container status \"774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29\": rpc error: code = NotFound desc = could not find container \"774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29\": container with ID starting with 774d3aed1f6d07c5a4ed18a4dc76b70f1c87ce23956e4d8bddd3708183a64e29 not found: ID does not exist" Nov 28 16:43:02 crc kubenswrapper[4647]: I1128 16:43:02.414144 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="963113e3-a124-41d7-bb7b-e06e135bb523" path="/var/lib/kubelet/pods/963113e3-a124-41d7-bb7b-e06e135bb523/volumes" Nov 28 16:43:17 crc kubenswrapper[4647]: I1128 16:43:17.022748 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:43:17 crc kubenswrapper[4647]: I1128 16:43:17.023577 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:43:47 crc kubenswrapper[4647]: I1128 16:43:47.022345 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:43:47 crc kubenswrapper[4647]: I1128 16:43:47.052115 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:43:47 crc kubenswrapper[4647]: I1128 16:43:47.052637 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:43:47 crc kubenswrapper[4647]: I1128 16:43:47.053717 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4b3bdace43b3abfee088ee84cf4e15cd15d4145f6575c446ae8e0bf78daa491b"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:43:47 crc kubenswrapper[4647]: I1128 16:43:47.053798 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://4b3bdace43b3abfee088ee84cf4e15cd15d4145f6575c446ae8e0bf78daa491b" gracePeriod=600 Nov 28 16:43:47 crc kubenswrapper[4647]: I1128 16:43:47.655114 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="4b3bdace43b3abfee088ee84cf4e15cd15d4145f6575c446ae8e0bf78daa491b" exitCode=0 Nov 28 16:43:47 crc kubenswrapper[4647]: I1128 16:43:47.655181 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"4b3bdace43b3abfee088ee84cf4e15cd15d4145f6575c446ae8e0bf78daa491b"} Nov 28 16:43:47 crc kubenswrapper[4647]: I1128 16:43:47.655494 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9"} Nov 28 16:43:47 crc kubenswrapper[4647]: I1128 16:43:47.655518 4647 scope.go:117] "RemoveContainer" containerID="6084e157fec35eb6e5e673c9dbee1a1d7e6bbfb760c9a6fe65ff1f59b0adac83" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.178242 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb"] Nov 28 16:45:00 crc kubenswrapper[4647]: E1128 16:45:00.179339 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="963113e3-a124-41d7-bb7b-e06e135bb523" containerName="extract-utilities" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.179358 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="963113e3-a124-41d7-bb7b-e06e135bb523" containerName="extract-utilities" Nov 28 16:45:00 crc kubenswrapper[4647]: E1128 16:45:00.179372 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="963113e3-a124-41d7-bb7b-e06e135bb523" containerName="extract-content" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.179381 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="963113e3-a124-41d7-bb7b-e06e135bb523" containerName="extract-content" Nov 28 16:45:00 crc kubenswrapper[4647]: E1128 16:45:00.179442 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="963113e3-a124-41d7-bb7b-e06e135bb523" containerName="registry-server" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.179452 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="963113e3-a124-41d7-bb7b-e06e135bb523" containerName="registry-server" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.179736 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="963113e3-a124-41d7-bb7b-e06e135bb523" containerName="registry-server" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.182402 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.184835 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.196983 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.207917 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb"] Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.297899 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ced96247-ec51-49cc-8155-adafde178de1-secret-volume\") pod \"collect-profiles-29405805-c97hb\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.298021 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ced96247-ec51-49cc-8155-adafde178de1-config-volume\") pod \"collect-profiles-29405805-c97hb\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.298053 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdxr7\" (UniqueName: \"kubernetes.io/projected/ced96247-ec51-49cc-8155-adafde178de1-kube-api-access-bdxr7\") pod \"collect-profiles-29405805-c97hb\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.399625 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ced96247-ec51-49cc-8155-adafde178de1-secret-volume\") pod \"collect-profiles-29405805-c97hb\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.399689 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ced96247-ec51-49cc-8155-adafde178de1-config-volume\") pod \"collect-profiles-29405805-c97hb\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.399711 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdxr7\" (UniqueName: \"kubernetes.io/projected/ced96247-ec51-49cc-8155-adafde178de1-kube-api-access-bdxr7\") pod \"collect-profiles-29405805-c97hb\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.401453 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ced96247-ec51-49cc-8155-adafde178de1-config-volume\") pod \"collect-profiles-29405805-c97hb\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.419078 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdxr7\" (UniqueName: \"kubernetes.io/projected/ced96247-ec51-49cc-8155-adafde178de1-kube-api-access-bdxr7\") pod \"collect-profiles-29405805-c97hb\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.442119 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ced96247-ec51-49cc-8155-adafde178de1-secret-volume\") pod \"collect-profiles-29405805-c97hb\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:00 crc kubenswrapper[4647]: I1128 16:45:00.528823 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:01 crc kubenswrapper[4647]: I1128 16:45:01.022063 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb"] Nov 28 16:45:01 crc kubenswrapper[4647]: I1128 16:45:01.445617 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" event={"ID":"ced96247-ec51-49cc-8155-adafde178de1","Type":"ContainerStarted","Data":"d5a833e0ded09035203205582b410c1b0725f3078bb0a90f33798e2f0f3eade0"} Nov 28 16:45:01 crc kubenswrapper[4647]: I1128 16:45:01.445657 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" event={"ID":"ced96247-ec51-49cc-8155-adafde178de1","Type":"ContainerStarted","Data":"30cc680840dcf548dac665eb5697cd38c88b90947d790f3c53cd829c6865f0df"} Nov 28 16:45:01 crc kubenswrapper[4647]: I1128 16:45:01.466067 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" podStartSLOduration=1.466041381 podStartE2EDuration="1.466041381s" podCreationTimestamp="2025-11-28 16:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:45:01.462914478 +0000 UTC m=+4831.310520899" watchObservedRunningTime="2025-11-28 16:45:01.466041381 +0000 UTC m=+4831.313647802" Nov 28 16:45:02 crc kubenswrapper[4647]: I1128 16:45:02.459017 4647 generic.go:334] "Generic (PLEG): container finished" podID="ced96247-ec51-49cc-8155-adafde178de1" containerID="d5a833e0ded09035203205582b410c1b0725f3078bb0a90f33798e2f0f3eade0" exitCode=0 Nov 28 16:45:02 crc kubenswrapper[4647]: I1128 16:45:02.459062 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" event={"ID":"ced96247-ec51-49cc-8155-adafde178de1","Type":"ContainerDied","Data":"d5a833e0ded09035203205582b410c1b0725f3078bb0a90f33798e2f0f3eade0"} Nov 28 16:45:03 crc kubenswrapper[4647]: I1128 16:45:03.893803 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:03 crc kubenswrapper[4647]: I1128 16:45:03.979038 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ced96247-ec51-49cc-8155-adafde178de1-secret-volume\") pod \"ced96247-ec51-49cc-8155-adafde178de1\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " Nov 28 16:45:03 crc kubenswrapper[4647]: I1128 16:45:03.979229 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ced96247-ec51-49cc-8155-adafde178de1-config-volume\") pod \"ced96247-ec51-49cc-8155-adafde178de1\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " Nov 28 16:45:03 crc kubenswrapper[4647]: I1128 16:45:03.979281 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdxr7\" (UniqueName: \"kubernetes.io/projected/ced96247-ec51-49cc-8155-adafde178de1-kube-api-access-bdxr7\") pod \"ced96247-ec51-49cc-8155-adafde178de1\" (UID: \"ced96247-ec51-49cc-8155-adafde178de1\") " Nov 28 16:45:03 crc kubenswrapper[4647]: I1128 16:45:03.980211 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ced96247-ec51-49cc-8155-adafde178de1-config-volume" (OuterVolumeSpecName: "config-volume") pod "ced96247-ec51-49cc-8155-adafde178de1" (UID: "ced96247-ec51-49cc-8155-adafde178de1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:45:03 crc kubenswrapper[4647]: I1128 16:45:03.988488 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ced96247-ec51-49cc-8155-adafde178de1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ced96247-ec51-49cc-8155-adafde178de1" (UID: "ced96247-ec51-49cc-8155-adafde178de1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:45:03 crc kubenswrapper[4647]: I1128 16:45:03.988765 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ced96247-ec51-49cc-8155-adafde178de1-kube-api-access-bdxr7" (OuterVolumeSpecName: "kube-api-access-bdxr7") pod "ced96247-ec51-49cc-8155-adafde178de1" (UID: "ced96247-ec51-49cc-8155-adafde178de1"). InnerVolumeSpecName "kube-api-access-bdxr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:45:04 crc kubenswrapper[4647]: I1128 16:45:04.081950 4647 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ced96247-ec51-49cc-8155-adafde178de1-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:04 crc kubenswrapper[4647]: I1128 16:45:04.082013 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdxr7\" (UniqueName: \"kubernetes.io/projected/ced96247-ec51-49cc-8155-adafde178de1-kube-api-access-bdxr7\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:04 crc kubenswrapper[4647]: I1128 16:45:04.082030 4647 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ced96247-ec51-49cc-8155-adafde178de1-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:04 crc kubenswrapper[4647]: I1128 16:45:04.480651 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" event={"ID":"ced96247-ec51-49cc-8155-adafde178de1","Type":"ContainerDied","Data":"30cc680840dcf548dac665eb5697cd38c88b90947d790f3c53cd829c6865f0df"} Nov 28 16:45:04 crc kubenswrapper[4647]: I1128 16:45:04.480699 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30cc680840dcf548dac665eb5697cd38c88b90947d790f3c53cd829c6865f0df" Nov 28 16:45:04 crc kubenswrapper[4647]: I1128 16:45:04.481167 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-c97hb" Nov 28 16:45:04 crc kubenswrapper[4647]: I1128 16:45:04.575063 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf"] Nov 28 16:45:04 crc kubenswrapper[4647]: I1128 16:45:04.586437 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-jtxxf"] Nov 28 16:45:06 crc kubenswrapper[4647]: I1128 16:45:06.413361 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75c74919-4a74-44fb-983f-1d4780061e77" path="/var/lib/kubelet/pods/75c74919-4a74-44fb-983f-1d4780061e77/volumes" Nov 28 16:45:08 crc kubenswrapper[4647]: I1128 16:45:08.524174 4647 generic.go:334] "Generic (PLEG): container finished" podID="5f2af68b-575d-469b-ab8d-7f16dfadc0d7" containerID="9a8bd37e5bda990b139a6b2ef4d7d5a18bb0a95c41b34613899a5e35a2f3a822" exitCode=0 Nov 28 16:45:08 crc kubenswrapper[4647]: I1128 16:45:08.524613 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"5f2af68b-575d-469b-ab8d-7f16dfadc0d7","Type":"ContainerDied","Data":"9a8bd37e5bda990b139a6b2ef4d7d5a18bb0a95c41b34613899a5e35a2f3a822"} Nov 28 16:45:09 crc kubenswrapper[4647]: I1128 16:45:09.895252 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.010460 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ssh-key\") pod \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.010585 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-config-data\") pod \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.010607 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.010916 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-workdir\") pod \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.011070 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config\") pod \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.011248 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ca-certs\") pod \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.011333 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config-secret\") pod \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.011401 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-temporary\") pod \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.011513 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6c9x\" (UniqueName: \"kubernetes.io/projected/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-kube-api-access-r6c9x\") pod \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\" (UID: \"5f2af68b-575d-469b-ab8d-7f16dfadc0d7\") " Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.015155 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "5f2af68b-575d-469b-ab8d-7f16dfadc0d7" (UID: "5f2af68b-575d-469b-ab8d-7f16dfadc0d7"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.016057 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-config-data" (OuterVolumeSpecName: "config-data") pod "5f2af68b-575d-469b-ab8d-7f16dfadc0d7" (UID: "5f2af68b-575d-469b-ab8d-7f16dfadc0d7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.019956 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "5f2af68b-575d-469b-ab8d-7f16dfadc0d7" (UID: "5f2af68b-575d-469b-ab8d-7f16dfadc0d7"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.036380 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "test-operator-logs") pod "5f2af68b-575d-469b-ab8d-7f16dfadc0d7" (UID: "5f2af68b-575d-469b-ab8d-7f16dfadc0d7"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.041395 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-kube-api-access-r6c9x" (OuterVolumeSpecName: "kube-api-access-r6c9x") pod "5f2af68b-575d-469b-ab8d-7f16dfadc0d7" (UID: "5f2af68b-575d-469b-ab8d-7f16dfadc0d7"). InnerVolumeSpecName "kube-api-access-r6c9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.051759 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "5f2af68b-575d-469b-ab8d-7f16dfadc0d7" (UID: "5f2af68b-575d-469b-ab8d-7f16dfadc0d7"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.055575 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5f2af68b-575d-469b-ab8d-7f16dfadc0d7" (UID: "5f2af68b-575d-469b-ab8d-7f16dfadc0d7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.056338 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "5f2af68b-575d-469b-ab8d-7f16dfadc0d7" (UID: "5f2af68b-575d-469b-ab8d-7f16dfadc0d7"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.099620 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "5f2af68b-575d-469b-ab8d-7f16dfadc0d7" (UID: "5f2af68b-575d-469b-ab8d-7f16dfadc0d7"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.114426 4647 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.114574 4647 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.114665 4647 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.114742 4647 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.114819 4647 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.114916 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6c9x\" (UniqueName: \"kubernetes.io/projected/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-kube-api-access-r6c9x\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.115011 4647 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.115106 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f2af68b-575d-469b-ab8d-7f16dfadc0d7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.115481 4647 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.137839 4647 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.217627 4647 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.557582 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"5f2af68b-575d-469b-ab8d-7f16dfadc0d7","Type":"ContainerDied","Data":"22e3ec1363b67b592fa2e6f2b9f7fa12338782a2b966b8cc6d7e33a058b5692b"} Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.558091 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22e3ec1363b67b592fa2e6f2b9f7fa12338782a2b966b8cc6d7e33a058b5692b" Nov 28 16:45:10 crc kubenswrapper[4647]: I1128 16:45:10.557828 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.197678 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 28 16:45:15 crc kubenswrapper[4647]: E1128 16:45:15.198893 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ced96247-ec51-49cc-8155-adafde178de1" containerName="collect-profiles" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.198911 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="ced96247-ec51-49cc-8155-adafde178de1" containerName="collect-profiles" Nov 28 16:45:15 crc kubenswrapper[4647]: E1128 16:45:15.198971 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f2af68b-575d-469b-ab8d-7f16dfadc0d7" containerName="tempest-tests-tempest-tests-runner" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.198983 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f2af68b-575d-469b-ab8d-7f16dfadc0d7" containerName="tempest-tests-tempest-tests-runner" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.199202 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="ced96247-ec51-49cc-8155-adafde178de1" containerName="collect-profiles" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.199231 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f2af68b-575d-469b-ab8d-7f16dfadc0d7" containerName="tempest-tests-tempest-tests-runner" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.200066 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.202796 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-2kkz8" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.213726 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.329251 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmjpp\" (UniqueName: \"kubernetes.io/projected/ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99-kube-api-access-zmjpp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.329452 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.431017 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.431225 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmjpp\" (UniqueName: \"kubernetes.io/projected/ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99-kube-api-access-zmjpp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.432025 4647 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.460874 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmjpp\" (UniqueName: \"kubernetes.io/projected/ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99-kube-api-access-zmjpp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.463553 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.524441 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 16:45:15 crc kubenswrapper[4647]: I1128 16:45:15.988261 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 28 16:45:16 crc kubenswrapper[4647]: I1128 16:45:16.618386 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99","Type":"ContainerStarted","Data":"e206337e800924485408efde042a143ab5cedf5ac6a130f59b5eada331a6ebe0"} Nov 28 16:45:17 crc kubenswrapper[4647]: I1128 16:45:17.629527 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99","Type":"ContainerStarted","Data":"b78366c5ad67a4492280f151e26234a44a6317155139acac4b0f56ce91df7059"} Nov 28 16:45:17 crc kubenswrapper[4647]: I1128 16:45:17.655203 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=1.6434501510000001 podStartE2EDuration="2.655183652s" podCreationTimestamp="2025-11-28 16:45:15 +0000 UTC" firstStartedPulling="2025-11-28 16:45:15.986863948 +0000 UTC m=+4845.834470369" lastFinishedPulling="2025-11-28 16:45:16.998597449 +0000 UTC m=+4846.846203870" observedRunningTime="2025-11-28 16:45:17.652180023 +0000 UTC m=+4847.499786454" watchObservedRunningTime="2025-11-28 16:45:17.655183652 +0000 UTC m=+4847.502790073" Nov 28 16:45:19 crc kubenswrapper[4647]: I1128 16:45:19.833558 4647 scope.go:117] "RemoveContainer" containerID="3fcd2f6dec339d4e20cecee8182a3d050db325cc6fcec8a9ac81326d11b96fec" Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.692176 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xwzm8/must-gather-hz5rs"] Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.694199 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/must-gather-hz5rs" Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.699189 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-xwzm8"/"kube-root-ca.crt" Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.699561 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-xwzm8"/"default-dockercfg-rkzz5" Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.699732 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-xwzm8"/"openshift-service-ca.crt" Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.720815 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-xwzm8/must-gather-hz5rs"] Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.790000 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b63976eb-8fb0-4a44-92ab-f4dda621eea1-must-gather-output\") pod \"must-gather-hz5rs\" (UID: \"b63976eb-8fb0-4a44-92ab-f4dda621eea1\") " pod="openshift-must-gather-xwzm8/must-gather-hz5rs" Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.790184 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6c5h\" (UniqueName: \"kubernetes.io/projected/b63976eb-8fb0-4a44-92ab-f4dda621eea1-kube-api-access-h6c5h\") pod \"must-gather-hz5rs\" (UID: \"b63976eb-8fb0-4a44-92ab-f4dda621eea1\") " pod="openshift-must-gather-xwzm8/must-gather-hz5rs" Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.892718 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6c5h\" (UniqueName: \"kubernetes.io/projected/b63976eb-8fb0-4a44-92ab-f4dda621eea1-kube-api-access-h6c5h\") pod \"must-gather-hz5rs\" (UID: \"b63976eb-8fb0-4a44-92ab-f4dda621eea1\") " pod="openshift-must-gather-xwzm8/must-gather-hz5rs" Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.892879 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b63976eb-8fb0-4a44-92ab-f4dda621eea1-must-gather-output\") pod \"must-gather-hz5rs\" (UID: \"b63976eb-8fb0-4a44-92ab-f4dda621eea1\") " pod="openshift-must-gather-xwzm8/must-gather-hz5rs" Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.893265 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b63976eb-8fb0-4a44-92ab-f4dda621eea1-must-gather-output\") pod \"must-gather-hz5rs\" (UID: \"b63976eb-8fb0-4a44-92ab-f4dda621eea1\") " pod="openshift-must-gather-xwzm8/must-gather-hz5rs" Nov 28 16:45:42 crc kubenswrapper[4647]: I1128 16:45:42.911910 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6c5h\" (UniqueName: \"kubernetes.io/projected/b63976eb-8fb0-4a44-92ab-f4dda621eea1-kube-api-access-h6c5h\") pod \"must-gather-hz5rs\" (UID: \"b63976eb-8fb0-4a44-92ab-f4dda621eea1\") " pod="openshift-must-gather-xwzm8/must-gather-hz5rs" Nov 28 16:45:43 crc kubenswrapper[4647]: I1128 16:45:43.014153 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/must-gather-hz5rs" Nov 28 16:45:43 crc kubenswrapper[4647]: I1128 16:45:43.770123 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-xwzm8/must-gather-hz5rs"] Nov 28 16:45:43 crc kubenswrapper[4647]: I1128 16:45:43.771199 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:45:43 crc kubenswrapper[4647]: I1128 16:45:43.970045 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/must-gather-hz5rs" event={"ID":"b63976eb-8fb0-4a44-92ab-f4dda621eea1","Type":"ContainerStarted","Data":"d875120b2150933de5a0c4838c60b99fe1f3cef8cd39dc0f77eb6bd8da15f4b8"} Nov 28 16:45:47 crc kubenswrapper[4647]: I1128 16:45:47.023174 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:45:47 crc kubenswrapper[4647]: I1128 16:45:47.023824 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:45:52 crc kubenswrapper[4647]: I1128 16:45:52.054948 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/must-gather-hz5rs" event={"ID":"b63976eb-8fb0-4a44-92ab-f4dda621eea1","Type":"ContainerStarted","Data":"3f0867b0a0cb2359e119f47b275738f624943e43b573db2b3dbea1515951b3fe"} Nov 28 16:45:52 crc kubenswrapper[4647]: I1128 16:45:52.055390 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/must-gather-hz5rs" event={"ID":"b63976eb-8fb0-4a44-92ab-f4dda621eea1","Type":"ContainerStarted","Data":"d3c995fb82186749407a94b168452e61f2012123d096570740e72a185c2b60b5"} Nov 28 16:45:52 crc kubenswrapper[4647]: I1128 16:45:52.073025 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-xwzm8/must-gather-hz5rs" podStartSLOduration=3.230839223 podStartE2EDuration="10.073002982s" podCreationTimestamp="2025-11-28 16:45:42 +0000 UTC" firstStartedPulling="2025-11-28 16:45:43.770971891 +0000 UTC m=+4873.618578312" lastFinishedPulling="2025-11-28 16:45:50.61313565 +0000 UTC m=+4880.460742071" observedRunningTime="2025-11-28 16:45:52.071488532 +0000 UTC m=+4881.919094953" watchObservedRunningTime="2025-11-28 16:45:52.073002982 +0000 UTC m=+4881.920609413" Nov 28 16:45:58 crc kubenswrapper[4647]: I1128 16:45:58.335301 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xwzm8/crc-debug-xlmq4"] Nov 28 16:45:58 crc kubenswrapper[4647]: I1128 16:45:58.337221 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" Nov 28 16:45:58 crc kubenswrapper[4647]: I1128 16:45:58.493883 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-host\") pod \"crc-debug-xlmq4\" (UID: \"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa\") " pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" Nov 28 16:45:58 crc kubenswrapper[4647]: I1128 16:45:58.493971 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5226\" (UniqueName: \"kubernetes.io/projected/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-kube-api-access-h5226\") pod \"crc-debug-xlmq4\" (UID: \"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa\") " pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" Nov 28 16:45:58 crc kubenswrapper[4647]: I1128 16:45:58.597997 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-host\") pod \"crc-debug-xlmq4\" (UID: \"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa\") " pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" Nov 28 16:45:58 crc kubenswrapper[4647]: I1128 16:45:58.598069 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5226\" (UniqueName: \"kubernetes.io/projected/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-kube-api-access-h5226\") pod \"crc-debug-xlmq4\" (UID: \"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa\") " pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" Nov 28 16:45:58 crc kubenswrapper[4647]: I1128 16:45:58.598135 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-host\") pod \"crc-debug-xlmq4\" (UID: \"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa\") " pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" Nov 28 16:45:58 crc kubenswrapper[4647]: I1128 16:45:58.623025 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5226\" (UniqueName: \"kubernetes.io/projected/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-kube-api-access-h5226\") pod \"crc-debug-xlmq4\" (UID: \"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa\") " pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" Nov 28 16:45:58 crc kubenswrapper[4647]: I1128 16:45:58.656167 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" Nov 28 16:45:58 crc kubenswrapper[4647]: W1128 16:45:58.887611 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13f85783_05b7_40cd_b5f0_fe6af9ca0dfa.slice/crio-63e70e2de0309897273b27a70f13e3d4624f2deef318d41a23ee02fa102be642 WatchSource:0}: Error finding container 63e70e2de0309897273b27a70f13e3d4624f2deef318d41a23ee02fa102be642: Status 404 returned error can't find the container with id 63e70e2de0309897273b27a70f13e3d4624f2deef318d41a23ee02fa102be642 Nov 28 16:45:59 crc kubenswrapper[4647]: I1128 16:45:59.119882 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" event={"ID":"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa","Type":"ContainerStarted","Data":"63e70e2de0309897273b27a70f13e3d4624f2deef318d41a23ee02fa102be642"} Nov 28 16:46:12 crc kubenswrapper[4647]: I1128 16:46:12.289483 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" event={"ID":"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa","Type":"ContainerStarted","Data":"43bbca3ea040b27baf1fd91ee7ccdf9da4169a716a35683b7690cfff10ecb75c"} Nov 28 16:46:12 crc kubenswrapper[4647]: I1128 16:46:12.313793 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" podStartSLOduration=1.8116056010000001 podStartE2EDuration="14.313771837s" podCreationTimestamp="2025-11-28 16:45:58 +0000 UTC" firstStartedPulling="2025-11-28 16:45:58.890842226 +0000 UTC m=+4888.738448647" lastFinishedPulling="2025-11-28 16:46:11.393008462 +0000 UTC m=+4901.240614883" observedRunningTime="2025-11-28 16:46:12.310156671 +0000 UTC m=+4902.157763092" watchObservedRunningTime="2025-11-28 16:46:12.313771837 +0000 UTC m=+4902.161378258" Nov 28 16:46:17 crc kubenswrapper[4647]: I1128 16:46:17.022517 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:46:17 crc kubenswrapper[4647]: I1128 16:46:17.023124 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:46:47 crc kubenswrapper[4647]: I1128 16:46:47.022768 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:46:47 crc kubenswrapper[4647]: I1128 16:46:47.023232 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:46:47 crc kubenswrapper[4647]: I1128 16:46:47.023274 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:46:47 crc kubenswrapper[4647]: I1128 16:46:47.023954 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:46:47 crc kubenswrapper[4647]: I1128 16:46:47.024003 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" gracePeriod=600 Nov 28 16:46:47 crc kubenswrapper[4647]: I1128 16:46:47.622541 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" exitCode=0 Nov 28 16:46:47 crc kubenswrapper[4647]: I1128 16:46:47.622586 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9"} Nov 28 16:46:47 crc kubenswrapper[4647]: I1128 16:46:47.622622 4647 scope.go:117] "RemoveContainer" containerID="4b3bdace43b3abfee088ee84cf4e15cd15d4145f6575c446ae8e0bf78daa491b" Nov 28 16:46:47 crc kubenswrapper[4647]: E1128 16:46:47.772006 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:46:48 crc kubenswrapper[4647]: I1128 16:46:48.643973 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:46:48 crc kubenswrapper[4647]: E1128 16:46:48.644213 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:47:02 crc kubenswrapper[4647]: I1128 16:47:02.762323 4647 generic.go:334] "Generic (PLEG): container finished" podID="13f85783-05b7-40cd-b5f0-fe6af9ca0dfa" containerID="43bbca3ea040b27baf1fd91ee7ccdf9da4169a716a35683b7690cfff10ecb75c" exitCode=0 Nov 28 16:47:02 crc kubenswrapper[4647]: I1128 16:47:02.762432 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" event={"ID":"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa","Type":"ContainerDied","Data":"43bbca3ea040b27baf1fd91ee7ccdf9da4169a716a35683b7690cfff10ecb75c"} Nov 28 16:47:03 crc kubenswrapper[4647]: I1128 16:47:03.394195 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:47:03 crc kubenswrapper[4647]: E1128 16:47:03.394623 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:47:03 crc kubenswrapper[4647]: I1128 16:47:03.865261 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" Nov 28 16:47:03 crc kubenswrapper[4647]: I1128 16:47:03.922165 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xwzm8/crc-debug-xlmq4"] Nov 28 16:47:03 crc kubenswrapper[4647]: I1128 16:47:03.929994 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xwzm8/crc-debug-xlmq4"] Nov 28 16:47:04 crc kubenswrapper[4647]: I1128 16:47:04.009100 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5226\" (UniqueName: \"kubernetes.io/projected/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-kube-api-access-h5226\") pod \"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa\" (UID: \"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa\") " Nov 28 16:47:04 crc kubenswrapper[4647]: I1128 16:47:04.009227 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-host\") pod \"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa\" (UID: \"13f85783-05b7-40cd-b5f0-fe6af9ca0dfa\") " Nov 28 16:47:04 crc kubenswrapper[4647]: I1128 16:47:04.009601 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-host" (OuterVolumeSpecName: "host") pod "13f85783-05b7-40cd-b5f0-fe6af9ca0dfa" (UID: "13f85783-05b7-40cd-b5f0-fe6af9ca0dfa"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:47:04 crc kubenswrapper[4647]: I1128 16:47:04.014268 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-kube-api-access-h5226" (OuterVolumeSpecName: "kube-api-access-h5226") pod "13f85783-05b7-40cd-b5f0-fe6af9ca0dfa" (UID: "13f85783-05b7-40cd-b5f0-fe6af9ca0dfa"). InnerVolumeSpecName "kube-api-access-h5226". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:47:04 crc kubenswrapper[4647]: I1128 16:47:04.111101 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5226\" (UniqueName: \"kubernetes.io/projected/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-kube-api-access-h5226\") on node \"crc\" DevicePath \"\"" Nov 28 16:47:04 crc kubenswrapper[4647]: I1128 16:47:04.111142 4647 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa-host\") on node \"crc\" DevicePath \"\"" Nov 28 16:47:04 crc kubenswrapper[4647]: I1128 16:47:04.414170 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13f85783-05b7-40cd-b5f0-fe6af9ca0dfa" path="/var/lib/kubelet/pods/13f85783-05b7-40cd-b5f0-fe6af9ca0dfa/volumes" Nov 28 16:47:04 crc kubenswrapper[4647]: I1128 16:47:04.783216 4647 scope.go:117] "RemoveContainer" containerID="43bbca3ea040b27baf1fd91ee7ccdf9da4169a716a35683b7690cfff10ecb75c" Nov 28 16:47:04 crc kubenswrapper[4647]: I1128 16:47:04.783484 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-xlmq4" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.161947 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xwzm8/crc-debug-ds8wv"] Nov 28 16:47:05 crc kubenswrapper[4647]: E1128 16:47:05.162633 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13f85783-05b7-40cd-b5f0-fe6af9ca0dfa" containerName="container-00" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.162651 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="13f85783-05b7-40cd-b5f0-fe6af9ca0dfa" containerName="container-00" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.162931 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="13f85783-05b7-40cd-b5f0-fe6af9ca0dfa" containerName="container-00" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.163584 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.238038 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfr68\" (UniqueName: \"kubernetes.io/projected/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-kube-api-access-kfr68\") pod \"crc-debug-ds8wv\" (UID: \"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d\") " pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.238114 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-host\") pod \"crc-debug-ds8wv\" (UID: \"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d\") " pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.340908 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfr68\" (UniqueName: \"kubernetes.io/projected/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-kube-api-access-kfr68\") pod \"crc-debug-ds8wv\" (UID: \"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d\") " pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.341017 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-host\") pod \"crc-debug-ds8wv\" (UID: \"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d\") " pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.341276 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-host\") pod \"crc-debug-ds8wv\" (UID: \"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d\") " pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.369044 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfr68\" (UniqueName: \"kubernetes.io/projected/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-kube-api-access-kfr68\") pod \"crc-debug-ds8wv\" (UID: \"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d\") " pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.494098 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" Nov 28 16:47:05 crc kubenswrapper[4647]: I1128 16:47:05.796739 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" event={"ID":"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d","Type":"ContainerStarted","Data":"66c009cb5e328852c392857c0b74f4c05d94906abdc1200a67d21e34d4e997d0"} Nov 28 16:47:06 crc kubenswrapper[4647]: I1128 16:47:06.813740 4647 generic.go:334] "Generic (PLEG): container finished" podID="cf44a33f-161f-4b3e-91e5-a1bafb2cde2d" containerID="8cbd86030c206ddc56e6ee834bd12bcea2a48b0bb2b3d6a2d39a1eeb609936b3" exitCode=0 Nov 28 16:47:06 crc kubenswrapper[4647]: I1128 16:47:06.814325 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" event={"ID":"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d","Type":"ContainerDied","Data":"8cbd86030c206ddc56e6ee834bd12bcea2a48b0bb2b3d6a2d39a1eeb609936b3"} Nov 28 16:47:07 crc kubenswrapper[4647]: I1128 16:47:07.926993 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" Nov 28 16:47:07 crc kubenswrapper[4647]: I1128 16:47:07.999666 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfr68\" (UniqueName: \"kubernetes.io/projected/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-kube-api-access-kfr68\") pod \"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d\" (UID: \"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d\") " Nov 28 16:47:07 crc kubenswrapper[4647]: I1128 16:47:07.999759 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-host\") pod \"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d\" (UID: \"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d\") " Nov 28 16:47:08 crc kubenswrapper[4647]: I1128 16:47:08.000057 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-host" (OuterVolumeSpecName: "host") pod "cf44a33f-161f-4b3e-91e5-a1bafb2cde2d" (UID: "cf44a33f-161f-4b3e-91e5-a1bafb2cde2d"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:47:08 crc kubenswrapper[4647]: I1128 16:47:08.000205 4647 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-host\") on node \"crc\" DevicePath \"\"" Nov 28 16:47:08 crc kubenswrapper[4647]: I1128 16:47:08.006898 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-kube-api-access-kfr68" (OuterVolumeSpecName: "kube-api-access-kfr68") pod "cf44a33f-161f-4b3e-91e5-a1bafb2cde2d" (UID: "cf44a33f-161f-4b3e-91e5-a1bafb2cde2d"). InnerVolumeSpecName "kube-api-access-kfr68". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:47:08 crc kubenswrapper[4647]: I1128 16:47:08.101424 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfr68\" (UniqueName: \"kubernetes.io/projected/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d-kube-api-access-kfr68\") on node \"crc\" DevicePath \"\"" Nov 28 16:47:08 crc kubenswrapper[4647]: I1128 16:47:08.852715 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" event={"ID":"cf44a33f-161f-4b3e-91e5-a1bafb2cde2d","Type":"ContainerDied","Data":"66c009cb5e328852c392857c0b74f4c05d94906abdc1200a67d21e34d4e997d0"} Nov 28 16:47:08 crc kubenswrapper[4647]: I1128 16:47:08.852967 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66c009cb5e328852c392857c0b74f4c05d94906abdc1200a67d21e34d4e997d0" Nov 28 16:47:08 crc kubenswrapper[4647]: I1128 16:47:08.853025 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-ds8wv" Nov 28 16:47:09 crc kubenswrapper[4647]: I1128 16:47:09.062728 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xwzm8/crc-debug-ds8wv"] Nov 28 16:47:09 crc kubenswrapper[4647]: I1128 16:47:09.072642 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xwzm8/crc-debug-ds8wv"] Nov 28 16:47:10 crc kubenswrapper[4647]: I1128 16:47:10.407592 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf44a33f-161f-4b3e-91e5-a1bafb2cde2d" path="/var/lib/kubelet/pods/cf44a33f-161f-4b3e-91e5-a1bafb2cde2d/volumes" Nov 28 16:47:10 crc kubenswrapper[4647]: I1128 16:47:10.571746 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-xwzm8/crc-debug-lm6gt"] Nov 28 16:47:10 crc kubenswrapper[4647]: E1128 16:47:10.572610 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf44a33f-161f-4b3e-91e5-a1bafb2cde2d" containerName="container-00" Nov 28 16:47:10 crc kubenswrapper[4647]: I1128 16:47:10.572693 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf44a33f-161f-4b3e-91e5-a1bafb2cde2d" containerName="container-00" Nov 28 16:47:10 crc kubenswrapper[4647]: I1128 16:47:10.572957 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf44a33f-161f-4b3e-91e5-a1bafb2cde2d" containerName="container-00" Nov 28 16:47:10 crc kubenswrapper[4647]: I1128 16:47:10.573650 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" Nov 28 16:47:10 crc kubenswrapper[4647]: I1128 16:47:10.684608 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-host\") pod \"crc-debug-lm6gt\" (UID: \"ee6269ee-a3c1-495b-b29c-7c60e288eb3e\") " pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" Nov 28 16:47:10 crc kubenswrapper[4647]: I1128 16:47:10.684808 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f47nv\" (UniqueName: \"kubernetes.io/projected/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-kube-api-access-f47nv\") pod \"crc-debug-lm6gt\" (UID: \"ee6269ee-a3c1-495b-b29c-7c60e288eb3e\") " pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" Nov 28 16:47:10 crc kubenswrapper[4647]: I1128 16:47:10.786702 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-host\") pod \"crc-debug-lm6gt\" (UID: \"ee6269ee-a3c1-495b-b29c-7c60e288eb3e\") " pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" Nov 28 16:47:10 crc kubenswrapper[4647]: I1128 16:47:10.786885 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f47nv\" (UniqueName: \"kubernetes.io/projected/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-kube-api-access-f47nv\") pod \"crc-debug-lm6gt\" (UID: \"ee6269ee-a3c1-495b-b29c-7c60e288eb3e\") " pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" Nov 28 16:47:10 crc kubenswrapper[4647]: I1128 16:47:10.786944 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-host\") pod \"crc-debug-lm6gt\" (UID: \"ee6269ee-a3c1-495b-b29c-7c60e288eb3e\") " pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" Nov 28 16:47:11 crc kubenswrapper[4647]: I1128 16:47:11.101794 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f47nv\" (UniqueName: \"kubernetes.io/projected/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-kube-api-access-f47nv\") pod \"crc-debug-lm6gt\" (UID: \"ee6269ee-a3c1-495b-b29c-7c60e288eb3e\") " pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" Nov 28 16:47:11 crc kubenswrapper[4647]: I1128 16:47:11.192478 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" Nov 28 16:47:11 crc kubenswrapper[4647]: I1128 16:47:11.880116 4647 generic.go:334] "Generic (PLEG): container finished" podID="ee6269ee-a3c1-495b-b29c-7c60e288eb3e" containerID="f79567c56b6a06d2991d4e8bb0c231a97a28a8d08e0a3edbbfa79f30ea2ecc44" exitCode=0 Nov 28 16:47:11 crc kubenswrapper[4647]: I1128 16:47:11.880160 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" event={"ID":"ee6269ee-a3c1-495b-b29c-7c60e288eb3e","Type":"ContainerDied","Data":"f79567c56b6a06d2991d4e8bb0c231a97a28a8d08e0a3edbbfa79f30ea2ecc44"} Nov 28 16:47:11 crc kubenswrapper[4647]: I1128 16:47:11.880187 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" event={"ID":"ee6269ee-a3c1-495b-b29c-7c60e288eb3e","Type":"ContainerStarted","Data":"5a2f11ff81c869ef96c600100ba0007ad95870943d9bea721bb016493ea9ccbf"} Nov 28 16:47:11 crc kubenswrapper[4647]: I1128 16:47:11.918904 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xwzm8/crc-debug-lm6gt"] Nov 28 16:47:11 crc kubenswrapper[4647]: I1128 16:47:11.928889 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xwzm8/crc-debug-lm6gt"] Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.202103 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mntdl"] Nov 28 16:47:12 crc kubenswrapper[4647]: E1128 16:47:12.202924 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee6269ee-a3c1-495b-b29c-7c60e288eb3e" containerName="container-00" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.203041 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee6269ee-a3c1-495b-b29c-7c60e288eb3e" containerName="container-00" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.203338 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee6269ee-a3c1-495b-b29c-7c60e288eb3e" containerName="container-00" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.205112 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.211336 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mntdl"] Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.317780 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-utilities\") pod \"certified-operators-mntdl\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.317854 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-catalog-content\") pod \"certified-operators-mntdl\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.317895 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fs7wp\" (UniqueName: \"kubernetes.io/projected/760cd115-15dd-4a88-b745-08a07b9fce9f-kube-api-access-fs7wp\") pod \"certified-operators-mntdl\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.420181 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-catalog-content\") pod \"certified-operators-mntdl\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.420253 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fs7wp\" (UniqueName: \"kubernetes.io/projected/760cd115-15dd-4a88-b745-08a07b9fce9f-kube-api-access-fs7wp\") pod \"certified-operators-mntdl\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.420539 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-utilities\") pod \"certified-operators-mntdl\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.420863 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-catalog-content\") pod \"certified-operators-mntdl\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.420941 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-utilities\") pod \"certified-operators-mntdl\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.452796 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fs7wp\" (UniqueName: \"kubernetes.io/projected/760cd115-15dd-4a88-b745-08a07b9fce9f-kube-api-access-fs7wp\") pod \"certified-operators-mntdl\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.524051 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:12 crc kubenswrapper[4647]: I1128 16:47:12.986997 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.106075 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mntdl"] Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.133239 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-host\") pod \"ee6269ee-a3c1-495b-b29c-7c60e288eb3e\" (UID: \"ee6269ee-a3c1-495b-b29c-7c60e288eb3e\") " Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.133625 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f47nv\" (UniqueName: \"kubernetes.io/projected/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-kube-api-access-f47nv\") pod \"ee6269ee-a3c1-495b-b29c-7c60e288eb3e\" (UID: \"ee6269ee-a3c1-495b-b29c-7c60e288eb3e\") " Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.133309 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-host" (OuterVolumeSpecName: "host") pod "ee6269ee-a3c1-495b-b29c-7c60e288eb3e" (UID: "ee6269ee-a3c1-495b-b29c-7c60e288eb3e"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.134393 4647 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-host\") on node \"crc\" DevicePath \"\"" Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.142693 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-kube-api-access-f47nv" (OuterVolumeSpecName: "kube-api-access-f47nv") pod "ee6269ee-a3c1-495b-b29c-7c60e288eb3e" (UID: "ee6269ee-a3c1-495b-b29c-7c60e288eb3e"). InnerVolumeSpecName "kube-api-access-f47nv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.236442 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f47nv\" (UniqueName: \"kubernetes.io/projected/ee6269ee-a3c1-495b-b29c-7c60e288eb3e-kube-api-access-f47nv\") on node \"crc\" DevicePath \"\"" Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.899050 4647 scope.go:117] "RemoveContainer" containerID="f79567c56b6a06d2991d4e8bb0c231a97a28a8d08e0a3edbbfa79f30ea2ecc44" Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.899369 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/crc-debug-lm6gt" Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.905190 4647 generic.go:334] "Generic (PLEG): container finished" podID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerID="a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a" exitCode=0 Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.905229 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mntdl" event={"ID":"760cd115-15dd-4a88-b745-08a07b9fce9f","Type":"ContainerDied","Data":"a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a"} Nov 28 16:47:13 crc kubenswrapper[4647]: I1128 16:47:13.905255 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mntdl" event={"ID":"760cd115-15dd-4a88-b745-08a07b9fce9f","Type":"ContainerStarted","Data":"ee9c411b47521ea10f1d1fbae4593364ddd6cb6f0e7f8c2bc12af15042553109"} Nov 28 16:47:14 crc kubenswrapper[4647]: I1128 16:47:14.408660 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee6269ee-a3c1-495b-b29c-7c60e288eb3e" path="/var/lib/kubelet/pods/ee6269ee-a3c1-495b-b29c-7c60e288eb3e/volumes" Nov 28 16:47:14 crc kubenswrapper[4647]: I1128 16:47:14.916620 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mntdl" event={"ID":"760cd115-15dd-4a88-b745-08a07b9fce9f","Type":"ContainerStarted","Data":"ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb"} Nov 28 16:47:15 crc kubenswrapper[4647]: I1128 16:47:15.394147 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:47:15 crc kubenswrapper[4647]: E1128 16:47:15.394694 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:47:16 crc kubenswrapper[4647]: I1128 16:47:16.940783 4647 generic.go:334] "Generic (PLEG): container finished" podID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerID="ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb" exitCode=0 Nov 28 16:47:16 crc kubenswrapper[4647]: I1128 16:47:16.940829 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mntdl" event={"ID":"760cd115-15dd-4a88-b745-08a07b9fce9f","Type":"ContainerDied","Data":"ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb"} Nov 28 16:47:17 crc kubenswrapper[4647]: I1128 16:47:17.951491 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mntdl" event={"ID":"760cd115-15dd-4a88-b745-08a07b9fce9f","Type":"ContainerStarted","Data":"e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455"} Nov 28 16:47:17 crc kubenswrapper[4647]: I1128 16:47:17.976089 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mntdl" podStartSLOduration=2.238150785 podStartE2EDuration="5.976069263s" podCreationTimestamp="2025-11-28 16:47:12 +0000 UTC" firstStartedPulling="2025-11-28 16:47:13.907652764 +0000 UTC m=+4963.755259185" lastFinishedPulling="2025-11-28 16:47:17.645571222 +0000 UTC m=+4967.493177663" observedRunningTime="2025-11-28 16:47:17.971195764 +0000 UTC m=+4967.818802185" watchObservedRunningTime="2025-11-28 16:47:17.976069263 +0000 UTC m=+4967.823675684" Nov 28 16:47:22 crc kubenswrapper[4647]: I1128 16:47:22.525052 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:22 crc kubenswrapper[4647]: I1128 16:47:22.525502 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:22 crc kubenswrapper[4647]: I1128 16:47:22.577656 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:23 crc kubenswrapper[4647]: I1128 16:47:23.043698 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:24 crc kubenswrapper[4647]: I1128 16:47:24.578442 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mntdl"] Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.013679 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mntdl" podUID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerName="registry-server" containerID="cri-o://e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455" gracePeriod=2 Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.502594 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.585591 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-utilities\") pod \"760cd115-15dd-4a88-b745-08a07b9fce9f\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.585670 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fs7wp\" (UniqueName: \"kubernetes.io/projected/760cd115-15dd-4a88-b745-08a07b9fce9f-kube-api-access-fs7wp\") pod \"760cd115-15dd-4a88-b745-08a07b9fce9f\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.585766 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-catalog-content\") pod \"760cd115-15dd-4a88-b745-08a07b9fce9f\" (UID: \"760cd115-15dd-4a88-b745-08a07b9fce9f\") " Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.586576 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-utilities" (OuterVolumeSpecName: "utilities") pod "760cd115-15dd-4a88-b745-08a07b9fce9f" (UID: "760cd115-15dd-4a88-b745-08a07b9fce9f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.592685 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/760cd115-15dd-4a88-b745-08a07b9fce9f-kube-api-access-fs7wp" (OuterVolumeSpecName: "kube-api-access-fs7wp") pod "760cd115-15dd-4a88-b745-08a07b9fce9f" (UID: "760cd115-15dd-4a88-b745-08a07b9fce9f"). InnerVolumeSpecName "kube-api-access-fs7wp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.649029 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "760cd115-15dd-4a88-b745-08a07b9fce9f" (UID: "760cd115-15dd-4a88-b745-08a07b9fce9f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.688337 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.688366 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fs7wp\" (UniqueName: \"kubernetes.io/projected/760cd115-15dd-4a88-b745-08a07b9fce9f-kube-api-access-fs7wp\") on node \"crc\" DevicePath \"\"" Nov 28 16:47:25 crc kubenswrapper[4647]: I1128 16:47:25.688377 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/760cd115-15dd-4a88-b745-08a07b9fce9f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.024615 4647 generic.go:334] "Generic (PLEG): container finished" podID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerID="e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455" exitCode=0 Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.024640 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mntdl" event={"ID":"760cd115-15dd-4a88-b745-08a07b9fce9f","Type":"ContainerDied","Data":"e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455"} Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.024683 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mntdl" event={"ID":"760cd115-15dd-4a88-b745-08a07b9fce9f","Type":"ContainerDied","Data":"ee9c411b47521ea10f1d1fbae4593364ddd6cb6f0e7f8c2bc12af15042553109"} Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.024704 4647 scope.go:117] "RemoveContainer" containerID="e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.024709 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mntdl" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.047613 4647 scope.go:117] "RemoveContainer" containerID="ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.071639 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mntdl"] Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.079212 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mntdl"] Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.089234 4647 scope.go:117] "RemoveContainer" containerID="a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.146374 4647 scope.go:117] "RemoveContainer" containerID="e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455" Nov 28 16:47:26 crc kubenswrapper[4647]: E1128 16:47:26.147054 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455\": container with ID starting with e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455 not found: ID does not exist" containerID="e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.147097 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455"} err="failed to get container status \"e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455\": rpc error: code = NotFound desc = could not find container \"e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455\": container with ID starting with e2918a349775fb93056e04c12695f18d752a6e6b49c4bb1af8a8d9c81ccf9455 not found: ID does not exist" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.147126 4647 scope.go:117] "RemoveContainer" containerID="ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb" Nov 28 16:47:26 crc kubenswrapper[4647]: E1128 16:47:26.147436 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb\": container with ID starting with ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb not found: ID does not exist" containerID="ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.147477 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb"} err="failed to get container status \"ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb\": rpc error: code = NotFound desc = could not find container \"ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb\": container with ID starting with ebe6609e6357dc8eed3f30079a437a60462f45c8fd0a8740e16eb2c234d788eb not found: ID does not exist" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.147506 4647 scope.go:117] "RemoveContainer" containerID="a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a" Nov 28 16:47:26 crc kubenswrapper[4647]: E1128 16:47:26.147917 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a\": container with ID starting with a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a not found: ID does not exist" containerID="a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.147946 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a"} err="failed to get container status \"a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a\": rpc error: code = NotFound desc = could not find container \"a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a\": container with ID starting with a2f9c0ec2870a42b7416bf9cfeeec3835d6467bec1dd33d0eaa2be53aa2baf3a not found: ID does not exist" Nov 28 16:47:26 crc kubenswrapper[4647]: I1128 16:47:26.407312 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="760cd115-15dd-4a88-b745-08a07b9fce9f" path="/var/lib/kubelet/pods/760cd115-15dd-4a88-b745-08a07b9fce9f/volumes" Nov 28 16:47:30 crc kubenswrapper[4647]: I1128 16:47:30.411654 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:47:30 crc kubenswrapper[4647]: E1128 16:47:30.412927 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:47:33 crc kubenswrapper[4647]: I1128 16:47:33.496180 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-8f8ccd5d4-thgh2_cad9ee6e-4bee-49c6-9f24-7c97e6e745ed/barbican-api/0.log" Nov 28 16:47:33 crc kubenswrapper[4647]: I1128 16:47:33.708175 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-8f8ccd5d4-thgh2_cad9ee6e-4bee-49c6-9f24-7c97e6e745ed/barbican-api-log/0.log" Nov 28 16:47:33 crc kubenswrapper[4647]: I1128 16:47:33.780236 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-68d4467b78-mhh9d_a70c88e0-3df8-484f-8343-2bf87f6c9f33/barbican-keystone-listener/0.log" Nov 28 16:47:33 crc kubenswrapper[4647]: I1128 16:47:33.865293 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-68d4467b78-mhh9d_a70c88e0-3df8-484f-8343-2bf87f6c9f33/barbican-keystone-listener-log/0.log" Nov 28 16:47:34 crc kubenswrapper[4647]: I1128 16:47:34.062487 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-9c6c77dd7-h6hcl_52ae86fe-ca94-41f0-880a-d957edd96160/barbican-worker/0.log" Nov 28 16:47:34 crc kubenswrapper[4647]: I1128 16:47:34.068493 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-9c6c77dd7-h6hcl_52ae86fe-ca94-41f0-880a-d957edd96160/barbican-worker-log/0.log" Nov 28 16:47:34 crc kubenswrapper[4647]: I1128 16:47:34.383973 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s_60d6a4cd-44ae-46ff-a980-a81ddab3b98c/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:34 crc kubenswrapper[4647]: I1128 16:47:34.402577 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_aa9888f9-1fbf-439a-978c-77b304679edf/ceilometer-central-agent/0.log" Nov 28 16:47:34 crc kubenswrapper[4647]: I1128 16:47:34.541379 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_aa9888f9-1fbf-439a-978c-77b304679edf/proxy-httpd/0.log" Nov 28 16:47:34 crc kubenswrapper[4647]: I1128 16:47:34.542868 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_aa9888f9-1fbf-439a-978c-77b304679edf/ceilometer-notification-agent/0.log" Nov 28 16:47:34 crc kubenswrapper[4647]: I1128 16:47:34.594459 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_aa9888f9-1fbf-439a-978c-77b304679edf/sg-core/0.log" Nov 28 16:47:34 crc kubenswrapper[4647]: I1128 16:47:34.800751 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_0f64718e-70bf-4d38-8c02-0523053f5e99/cinder-api-log/0.log" Nov 28 16:47:34 crc kubenswrapper[4647]: I1128 16:47:34.854467 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_0f64718e-70bf-4d38-8c02-0523053f5e99/cinder-api/0.log" Nov 28 16:47:34 crc kubenswrapper[4647]: I1128 16:47:34.981324 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_a6eb59e0-a2aa-49d0-a662-8e37f51004ef/cinder-scheduler/0.log" Nov 28 16:47:35 crc kubenswrapper[4647]: I1128 16:47:35.202899 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_a6eb59e0-a2aa-49d0-a662-8e37f51004ef/probe/0.log" Nov 28 16:47:35 crc kubenswrapper[4647]: I1128 16:47:35.227460 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-flpg5_77715da8-feee-451f-a972-a2e52884582a/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:35 crc kubenswrapper[4647]: I1128 16:47:35.359077 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb_a8d3c439-b6ee-42bd-96d2-eb725c996b97/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:35 crc kubenswrapper[4647]: I1128 16:47:35.553866 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-667c9c995c-qp6ls_c06de7d8-7b65-4a7b-876c-0049182a2ec0/init/0.log" Nov 28 16:47:35 crc kubenswrapper[4647]: I1128 16:47:35.740711 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-667c9c995c-qp6ls_c06de7d8-7b65-4a7b-876c-0049182a2ec0/init/0.log" Nov 28 16:47:35 crc kubenswrapper[4647]: I1128 16:47:35.875738 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-667c9c995c-qp6ls_c06de7d8-7b65-4a7b-876c-0049182a2ec0/dnsmasq-dns/0.log" Nov 28 16:47:36 crc kubenswrapper[4647]: I1128 16:47:36.148355 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z_aa17a444-8971-4590-a7a5-9d303c00b90e/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:36 crc kubenswrapper[4647]: I1128 16:47:36.323236 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_cf5f0db0-688a-43f4-b38e-8478858003fa/glance-httpd/0.log" Nov 28 16:47:36 crc kubenswrapper[4647]: I1128 16:47:36.353641 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_cf5f0db0-688a-43f4-b38e-8478858003fa/glance-log/0.log" Nov 28 16:47:36 crc kubenswrapper[4647]: I1128 16:47:36.609376 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_38dcdbad-1599-4387-8587-6676317adbc3/glance-log/0.log" Nov 28 16:47:36 crc kubenswrapper[4647]: I1128 16:47:36.696872 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_38dcdbad-1599-4387-8587-6676317adbc3/glance-httpd/0.log" Nov 28 16:47:36 crc kubenswrapper[4647]: I1128 16:47:36.799602 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-66c6c46cdb-xgv7h_278aef39-0aaf-4d33-b167-0f0cca8248fd/horizon/2.log" Nov 28 16:47:36 crc kubenswrapper[4647]: I1128 16:47:36.946432 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-66c6c46cdb-xgv7h_278aef39-0aaf-4d33-b167-0f0cca8248fd/horizon/1.log" Nov 28 16:47:37 crc kubenswrapper[4647]: I1128 16:47:37.163043 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-d78nq_4cb1ba80-4e50-4753-9887-3e420c825d2a/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:37 crc kubenswrapper[4647]: I1128 16:47:37.387058 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-nklbp_cbde4c89-6c68-4422-931b-94507dc5376d/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:37 crc kubenswrapper[4647]: I1128 16:47:37.453269 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-66c6c46cdb-xgv7h_278aef39-0aaf-4d33-b167-0f0cca8248fd/horizon-log/0.log" Nov 28 16:47:37 crc kubenswrapper[4647]: I1128 16:47:37.700019 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29405761-4rc8f_03bc8de1-2028-4b26-bf81-c51d09cf6a71/keystone-cron/0.log" Nov 28 16:47:37 crc kubenswrapper[4647]: I1128 16:47:37.993235 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_bbd46dc1-09cb-44e2-8150-a1f512a3efc9/kube-state-metrics/0.log" Nov 28 16:47:38 crc kubenswrapper[4647]: I1128 16:47:38.087751 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-8567d4b5-5ss7w_317b992c-8c2d-4838-bfbf-6debefd73d0a/keystone-api/0.log" Nov 28 16:47:38 crc kubenswrapper[4647]: I1128 16:47:38.225995 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5_93e3bb5f-ef6f-44de-9f2c-aa13871df572/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:38 crc kubenswrapper[4647]: I1128 16:47:38.741386 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc_1180d1cb-f9bc-4646-864d-0bdea17fd99f/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:38 crc kubenswrapper[4647]: I1128 16:47:38.958689 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-76d7749889-z87rt_8e65fe60-2d61-4066-aed7-6e211c8f2096/neutron-httpd/0.log" Nov 28 16:47:39 crc kubenswrapper[4647]: I1128 16:47:39.259803 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-76d7749889-z87rt_8e65fe60-2d61-4066-aed7-6e211c8f2096/neutron-api/0.log" Nov 28 16:47:39 crc kubenswrapper[4647]: I1128 16:47:39.825433 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_29b2bc60-27b4-48ee-b7d5-39a9c9648c03/nova-cell0-conductor-conductor/0.log" Nov 28 16:47:40 crc kubenswrapper[4647]: I1128 16:47:40.316704 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_a641c2b8-f7d8-4829-8d49-d8eff2e2d132/nova-cell1-conductor-conductor/0.log" Nov 28 16:47:40 crc kubenswrapper[4647]: I1128 16:47:40.585021 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_ee715495-598f-4c76-9399-92846d682bbe/nova-cell1-novncproxy-novncproxy/0.log" Nov 28 16:47:40 crc kubenswrapper[4647]: I1128 16:47:40.761110 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_4d763768-9218-4866-94d2-1197f5e81fce/nova-api-api/0.log" Nov 28 16:47:40 crc kubenswrapper[4647]: I1128 16:47:40.797473 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_4d763768-9218-4866-94d2-1197f5e81fce/nova-api-log/0.log" Nov 28 16:47:41 crc kubenswrapper[4647]: I1128 16:47:41.210749 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-p69xl_338f9128-79ea-4cda-b4e8-7664e6057225/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:41 crc kubenswrapper[4647]: I1128 16:47:41.386993 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f/nova-metadata-log/0.log" Nov 28 16:47:41 crc kubenswrapper[4647]: I1128 16:47:41.846240 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0f876088-07c2-4cb0-8096-681aaf594d6a/mysql-bootstrap/0.log" Nov 28 16:47:42 crc kubenswrapper[4647]: I1128 16:47:42.032472 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_7597bf4e-3cd6-4adb-8723-8a86aaf60a05/nova-scheduler-scheduler/0.log" Nov 28 16:47:42 crc kubenswrapper[4647]: I1128 16:47:42.167104 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0f876088-07c2-4cb0-8096-681aaf594d6a/galera/0.log" Nov 28 16:47:42 crc kubenswrapper[4647]: I1128 16:47:42.204136 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0f876088-07c2-4cb0-8096-681aaf594d6a/mysql-bootstrap/0.log" Nov 28 16:47:42 crc kubenswrapper[4647]: I1128 16:47:42.394346 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:47:42 crc kubenswrapper[4647]: E1128 16:47:42.394550 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:47:42 crc kubenswrapper[4647]: I1128 16:47:42.430079 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_63a87633-1166-4787-99ee-ec4a5fd02b87/mysql-bootstrap/0.log" Nov 28 16:47:42 crc kubenswrapper[4647]: I1128 16:47:42.685384 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_63a87633-1166-4787-99ee-ec4a5fd02b87/galera/0.log" Nov 28 16:47:42 crc kubenswrapper[4647]: I1128 16:47:42.720757 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_63a87633-1166-4787-99ee-ec4a5fd02b87/mysql-bootstrap/0.log" Nov 28 16:47:42 crc kubenswrapper[4647]: I1128 16:47:42.862706 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_c765a2ba-ed3c-471b-8794-1623c126f0f2/openstackclient/0.log" Nov 28 16:47:43 crc kubenswrapper[4647]: I1128 16:47:43.110790 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-4psvg_fab755a9-f20f-4bc6-a7e2-353396a0ce74/ovn-controller/0.log" Nov 28 16:47:43 crc kubenswrapper[4647]: I1128 16:47:43.195926 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f/nova-metadata-metadata/0.log" Nov 28 16:47:43 crc kubenswrapper[4647]: I1128 16:47:43.256870 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-zql68_083c881c-8e40-4d03-b4f1-91af7bcd2cd1/openstack-network-exporter/0.log" Nov 28 16:47:43 crc kubenswrapper[4647]: I1128 16:47:43.418736 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vh9b5_a5d1c6f3-2c4f-4046-81fc-c2e210100c4b/ovsdb-server-init/0.log" Nov 28 16:47:43 crc kubenswrapper[4647]: I1128 16:47:43.772971 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vh9b5_a5d1c6f3-2c4f-4046-81fc-c2e210100c4b/ovsdb-server-init/0.log" Nov 28 16:47:43 crc kubenswrapper[4647]: I1128 16:47:43.884352 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vh9b5_a5d1c6f3-2c4f-4046-81fc-c2e210100c4b/ovsdb-server/0.log" Nov 28 16:47:43 crc kubenswrapper[4647]: I1128 16:47:43.889616 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vh9b5_a5d1c6f3-2c4f-4046-81fc-c2e210100c4b/ovs-vswitchd/0.log" Nov 28 16:47:44 crc kubenswrapper[4647]: I1128 16:47:44.017449 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-rrkrm_7123f6db-1e1e-4bfb-97ca-f142f6cdb13a/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:44 crc kubenswrapper[4647]: I1128 16:47:44.171436 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f5edb07c-df1f-4434-8608-97841a748dd2/openstack-network-exporter/0.log" Nov 28 16:47:44 crc kubenswrapper[4647]: I1128 16:47:44.285880 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f5edb07c-df1f-4434-8608-97841a748dd2/ovn-northd/0.log" Nov 28 16:47:44 crc kubenswrapper[4647]: I1128 16:47:44.460608 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_de3257de-cbde-4dca-89c7-21af1617cc66/openstack-network-exporter/0.log" Nov 28 16:47:44 crc kubenswrapper[4647]: I1128 16:47:44.605592 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_de3257de-cbde-4dca-89c7-21af1617cc66/ovsdbserver-nb/0.log" Nov 28 16:47:44 crc kubenswrapper[4647]: I1128 16:47:44.646766 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0/openstack-network-exporter/0.log" Nov 28 16:47:44 crc kubenswrapper[4647]: I1128 16:47:44.848754 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0/ovsdbserver-sb/0.log" Nov 28 16:47:45 crc kubenswrapper[4647]: I1128 16:47:45.179800 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6565667588-kf4hg_cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407/placement-api/0.log" Nov 28 16:47:45 crc kubenswrapper[4647]: I1128 16:47:45.309916 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_271d0057-21bf-4899-9284-d8d2beb015b6/setup-container/0.log" Nov 28 16:47:45 crc kubenswrapper[4647]: I1128 16:47:45.326482 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6565667588-kf4hg_cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407/placement-log/0.log" Nov 28 16:47:45 crc kubenswrapper[4647]: I1128 16:47:45.634823 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_50660b1a-39a9-4ada-a275-a068d6b406bf/setup-container/0.log" Nov 28 16:47:45 crc kubenswrapper[4647]: I1128 16:47:45.638983 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_271d0057-21bf-4899-9284-d8d2beb015b6/setup-container/0.log" Nov 28 16:47:45 crc kubenswrapper[4647]: I1128 16:47:45.653562 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_271d0057-21bf-4899-9284-d8d2beb015b6/rabbitmq/0.log" Nov 28 16:47:45 crc kubenswrapper[4647]: I1128 16:47:45.958376 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_50660b1a-39a9-4ada-a275-a068d6b406bf/setup-container/0.log" Nov 28 16:47:46 crc kubenswrapper[4647]: I1128 16:47:46.002309 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l_8cfdffa4-b728-4135-a613-7198ffda163d/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:46 crc kubenswrapper[4647]: I1128 16:47:46.029819 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_50660b1a-39a9-4ada-a275-a068d6b406bf/rabbitmq/0.log" Nov 28 16:47:46 crc kubenswrapper[4647]: I1128 16:47:46.317814 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-hjmk9_54d11771-921e-4086-a17f-c853026c4a3e/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:46 crc kubenswrapper[4647]: I1128 16:47:46.415967 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp_ee739c78-f4a0-46eb-a0ca-a7bcab007c16/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:46 crc kubenswrapper[4647]: I1128 16:47:46.645347 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-tkvrh_965d18f9-ce83-44f5-8ec7-4b13eefa7e30/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:46 crc kubenswrapper[4647]: I1128 16:47:46.790266 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-k8pmm_e27e0d42-24a3-447f-aa49-fc305e1253c0/ssh-known-hosts-edpm-deployment/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.081710 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6f577f58dc-7rp75_c5168b52-c295-45d6-aa36-932b5bb95a97/proxy-server/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.123943 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6f577f58dc-7rp75_c5168b52-c295-45d6-aa36-932b5bb95a97/proxy-httpd/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.205993 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-dj6b9_b098c625-f531-4a3a-8532-fbfc7cd4f236/swift-ring-rebalance/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.383339 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_b7a9cfb5-c9cd-45ee-906e-70926173aa87/memcached/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.422495 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/account-auditor/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.446993 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/account-reaper/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.511371 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/account-replicator/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.604962 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/account-server/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.663964 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/container-auditor/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.689890 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/container-server/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.725551 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/container-replicator/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.768843 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/container-updater/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.870778 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/object-expirer/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.899386 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/object-auditor/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.938388 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/object-replicator/0.log" Nov 28 16:47:47 crc kubenswrapper[4647]: I1128 16:47:47.998525 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/object-server/0.log" Nov 28 16:47:48 crc kubenswrapper[4647]: I1128 16:47:48.021108 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/object-updater/0.log" Nov 28 16:47:48 crc kubenswrapper[4647]: I1128 16:47:48.103554 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/rsync/0.log" Nov 28 16:47:48 crc kubenswrapper[4647]: I1128 16:47:48.133378 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/swift-recon-cron/0.log" Nov 28 16:47:48 crc kubenswrapper[4647]: I1128 16:47:48.309345 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-wghtm_c1d8e071-fad7-4b8d-8637-e7be304c4c86/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:48 crc kubenswrapper[4647]: I1128 16:47:48.384301 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_5f2af68b-575d-469b-ab8d-7f16dfadc0d7/tempest-tests-tempest-tests-runner/0.log" Nov 28 16:47:48 crc kubenswrapper[4647]: I1128 16:47:48.456902 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99/test-operator-logs-container/0.log" Nov 28 16:47:48 crc kubenswrapper[4647]: I1128 16:47:48.553129 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv_123e6be6-0c87-4a2b-8f94-ae8207ccbaa5/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:47:56 crc kubenswrapper[4647]: I1128 16:47:56.396799 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:47:56 crc kubenswrapper[4647]: E1128 16:47:56.397346 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:48:07 crc kubenswrapper[4647]: I1128 16:48:07.394967 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:48:07 crc kubenswrapper[4647]: E1128 16:48:07.396147 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:48:13 crc kubenswrapper[4647]: I1128 16:48:13.488351 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/util/0.log" Nov 28 16:48:13 crc kubenswrapper[4647]: I1128 16:48:13.713048 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/util/0.log" Nov 28 16:48:13 crc kubenswrapper[4647]: I1128 16:48:13.774475 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/pull/0.log" Nov 28 16:48:13 crc kubenswrapper[4647]: I1128 16:48:13.774777 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/pull/0.log" Nov 28 16:48:13 crc kubenswrapper[4647]: I1128 16:48:13.896631 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/pull/0.log" Nov 28 16:48:13 crc kubenswrapper[4647]: I1128 16:48:13.921886 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/util/0.log" Nov 28 16:48:13 crc kubenswrapper[4647]: I1128 16:48:13.951740 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/extract/0.log" Nov 28 16:48:14 crc kubenswrapper[4647]: I1128 16:48:14.627737 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-qbdbz_c27f5305-5c04-401d-b53e-ca2df0999cfd/kube-rbac-proxy/0.log" Nov 28 16:48:14 crc kubenswrapper[4647]: I1128 16:48:14.665247 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-s88h5_205cebfd-f183-486f-965f-ab494cae35dd/kube-rbac-proxy/0.log" Nov 28 16:48:14 crc kubenswrapper[4647]: I1128 16:48:14.708068 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-qbdbz_c27f5305-5c04-401d-b53e-ca2df0999cfd/manager/0.log" Nov 28 16:48:14 crc kubenswrapper[4647]: I1128 16:48:14.887300 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-s88h5_205cebfd-f183-486f-965f-ab494cae35dd/manager/0.log" Nov 28 16:48:14 crc kubenswrapper[4647]: I1128 16:48:14.968309 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-687dh_022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619/manager/0.log" Nov 28 16:48:14 crc kubenswrapper[4647]: I1128 16:48:14.980668 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-687dh_022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619/kube-rbac-proxy/0.log" Nov 28 16:48:15 crc kubenswrapper[4647]: I1128 16:48:15.168999 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-85fbd69fcd-kdwxt_62caff02-44e5-4ae9-8879-e588e2ec2c26/kube-rbac-proxy/0.log" Nov 28 16:48:15 crc kubenswrapper[4647]: I1128 16:48:15.273316 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-85fbd69fcd-kdwxt_62caff02-44e5-4ae9-8879-e588e2ec2c26/manager/0.log" Nov 28 16:48:15 crc kubenswrapper[4647]: I1128 16:48:15.348904 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-cpbpp_2698b76b-928c-4d48-bf4e-e03df478867a/kube-rbac-proxy/0.log" Nov 28 16:48:15 crc kubenswrapper[4647]: I1128 16:48:15.383165 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-cpbpp_2698b76b-928c-4d48-bf4e-e03df478867a/manager/0.log" Nov 28 16:48:15 crc kubenswrapper[4647]: I1128 16:48:15.549271 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-w2446_43fb88ed-c57b-412e-a210-49ce2e7f8848/kube-rbac-proxy/0.log" Nov 28 16:48:15 crc kubenswrapper[4647]: I1128 16:48:15.598676 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-w2446_43fb88ed-c57b-412e-a210-49ce2e7f8848/manager/0.log" Nov 28 16:48:15 crc kubenswrapper[4647]: I1128 16:48:15.731868 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-v9m4v_93230429-04c5-45a9-81c5-dab4213025d4/kube-rbac-proxy/0.log" Nov 28 16:48:15 crc kubenswrapper[4647]: I1128 16:48:15.827612 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-vl88c_6576f1dc-a847-446f-a228-d287036b2d56/kube-rbac-proxy/0.log" Nov 28 16:48:15 crc kubenswrapper[4647]: I1128 16:48:15.983709 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-v9m4v_93230429-04c5-45a9-81c5-dab4213025d4/manager/0.log" Nov 28 16:48:16 crc kubenswrapper[4647]: I1128 16:48:16.022983 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-vl88c_6576f1dc-a847-446f-a228-d287036b2d56/manager/0.log" Nov 28 16:48:16 crc kubenswrapper[4647]: I1128 16:48:16.129735 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-x2ggd_a4c112f9-f801-4aec-b715-72b336978342/kube-rbac-proxy/0.log" Nov 28 16:48:16 crc kubenswrapper[4647]: I1128 16:48:16.445227 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-x2ggd_a4c112f9-f801-4aec-b715-72b336978342/manager/0.log" Nov 28 16:48:16 crc kubenswrapper[4647]: I1128 16:48:16.450386 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5cbc8c7f96-jnnvb_7ff4e4d2-ff33-484b-bc15-f0192f009688/kube-rbac-proxy/0.log" Nov 28 16:48:16 crc kubenswrapper[4647]: I1128 16:48:16.582048 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5cbc8c7f96-jnnvb_7ff4e4d2-ff33-484b-bc15-f0192f009688/manager/0.log" Nov 28 16:48:16 crc kubenswrapper[4647]: I1128 16:48:16.728700 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-4tgc6_d32856fc-f28a-4e36-9e9b-0d09486b8a09/kube-rbac-proxy/0.log" Nov 28 16:48:16 crc kubenswrapper[4647]: I1128 16:48:16.765858 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-4tgc6_d32856fc-f28a-4e36-9e9b-0d09486b8a09/manager/0.log" Nov 28 16:48:16 crc kubenswrapper[4647]: I1128 16:48:16.826097 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-d9tcm_7760330a-6914-44a7-9fa5-aa6e6478506a/kube-rbac-proxy/0.log" Nov 28 16:48:16 crc kubenswrapper[4647]: I1128 16:48:16.975367 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-d9tcm_7760330a-6914-44a7-9fa5-aa6e6478506a/manager/0.log" Nov 28 16:48:17 crc kubenswrapper[4647]: I1128 16:48:17.074178 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-8szbq_f71ac6d8-b917-43a7-a35c-dce863f16280/kube-rbac-proxy/0.log" Nov 28 16:48:17 crc kubenswrapper[4647]: I1128 16:48:17.231350 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-5992m_6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6/kube-rbac-proxy/0.log" Nov 28 16:48:17 crc kubenswrapper[4647]: I1128 16:48:17.645384 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-zg7rq_873a1114-80f7-43f8-b6de-b69a7a152411/kube-rbac-proxy/0.log" Nov 28 16:48:17 crc kubenswrapper[4647]: I1128 16:48:17.645906 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-zg7rq_873a1114-80f7-43f8-b6de-b69a7a152411/manager/0.log" Nov 28 16:48:17 crc kubenswrapper[4647]: I1128 16:48:17.646507 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-5992m_6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6/manager/0.log" Nov 28 16:48:17 crc kubenswrapper[4647]: I1128 16:48:17.745895 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-8szbq_f71ac6d8-b917-43a7-a35c-dce863f16280/manager/0.log" Nov 28 16:48:17 crc kubenswrapper[4647]: I1128 16:48:17.883680 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-69699fdd55-qvs9j_a57f869c-1d71-4341-a632-870e7b3dfede/kube-rbac-proxy/0.log" Nov 28 16:48:17 crc kubenswrapper[4647]: I1128 16:48:17.949436 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-857c5c6d5d-pt9fz_c8f72013-5d98-4478-bdd9-180abb82af2c/kube-rbac-proxy/0.log" Nov 28 16:48:18 crc kubenswrapper[4647]: I1128 16:48:18.252237 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-vtdpv_76ab5951-89d3-4ad1-8b6b-26982de63912/registry-server/0.log" Nov 28 16:48:18 crc kubenswrapper[4647]: I1128 16:48:18.254124 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-857c5c6d5d-pt9fz_c8f72013-5d98-4478-bdd9-180abb82af2c/operator/0.log" Nov 28 16:48:18 crc kubenswrapper[4647]: I1128 16:48:18.505324 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-2v9x8_04af16a5-b153-433f-9c39-859c16167b0f/kube-rbac-proxy/0.log" Nov 28 16:48:18 crc kubenswrapper[4647]: I1128 16:48:18.621546 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-2v9x8_04af16a5-b153-433f-9c39-859c16167b0f/manager/0.log" Nov 28 16:48:18 crc kubenswrapper[4647]: I1128 16:48:18.649695 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-pzkc9_c2af1d24-9d02-4f14-95b7-3875382cb095/kube-rbac-proxy/0.log" Nov 28 16:48:18 crc kubenswrapper[4647]: I1128 16:48:18.784756 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-pzkc9_c2af1d24-9d02-4f14-95b7-3875382cb095/manager/0.log" Nov 28 16:48:18 crc kubenswrapper[4647]: I1128 16:48:18.978912 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-kvck7_1a8f857d-6498-42ba-bbc5-2bb5b2896c6e/operator/0.log" Nov 28 16:48:19 crc kubenswrapper[4647]: I1128 16:48:19.056323 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-zqz4k_4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f/kube-rbac-proxy/0.log" Nov 28 16:48:19 crc kubenswrapper[4647]: I1128 16:48:19.140558 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-69699fdd55-qvs9j_a57f869c-1d71-4341-a632-870e7b3dfede/manager/0.log" Nov 28 16:48:19 crc kubenswrapper[4647]: I1128 16:48:19.231889 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-zqz4k_4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f/manager/0.log" Nov 28 16:48:19 crc kubenswrapper[4647]: I1128 16:48:19.279422 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-695797c565-rk4n7_5f72c046-071d-4e1c-8e12-6574bed76f27/kube-rbac-proxy/0.log" Nov 28 16:48:19 crc kubenswrapper[4647]: I1128 16:48:19.381441 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-695797c565-rk4n7_5f72c046-071d-4e1c-8e12-6574bed76f27/manager/0.log" Nov 28 16:48:19 crc kubenswrapper[4647]: I1128 16:48:19.470811 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-bb86466d8-9dc64_577fb6a4-bb39-4df2-b161-04b2ac2f44d4/manager/0.log" Nov 28 16:48:19 crc kubenswrapper[4647]: I1128 16:48:19.531865 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-bb86466d8-9dc64_577fb6a4-bb39-4df2-b161-04b2ac2f44d4/kube-rbac-proxy/0.log" Nov 28 16:48:19 crc kubenswrapper[4647]: I1128 16:48:19.640297 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-f4jfb_93200d81-c9c3-4d5e-8406-112eef462119/kube-rbac-proxy/0.log" Nov 28 16:48:19 crc kubenswrapper[4647]: I1128 16:48:19.692007 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-f4jfb_93200d81-c9c3-4d5e-8406-112eef462119/manager/0.log" Nov 28 16:48:20 crc kubenswrapper[4647]: I1128 16:48:20.400835 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:48:20 crc kubenswrapper[4647]: E1128 16:48:20.401494 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:48:31 crc kubenswrapper[4647]: I1128 16:48:31.393949 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:48:31 crc kubenswrapper[4647]: E1128 16:48:31.394679 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:48:37 crc kubenswrapper[4647]: I1128 16:48:37.944127 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-5gqvz_4a2a1306-2eff-4fc2-ac8c-8bb461353abd/control-plane-machine-set-operator/0.log" Nov 28 16:48:38 crc kubenswrapper[4647]: I1128 16:48:38.112920 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6t8hg_4408688c-7115-4338-9b06-e30b0ed30399/kube-rbac-proxy/0.log" Nov 28 16:48:38 crc kubenswrapper[4647]: I1128 16:48:38.143486 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6t8hg_4408688c-7115-4338-9b06-e30b0ed30399/machine-api-operator/0.log" Nov 28 16:48:43 crc kubenswrapper[4647]: I1128 16:48:43.394629 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:48:43 crc kubenswrapper[4647]: E1128 16:48:43.395355 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:48:51 crc kubenswrapper[4647]: I1128 16:48:51.283782 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-s96fs_7e8c6733-5ed1-46be-b533-e3b03a586fd5/cert-manager-controller/0.log" Nov 28 16:48:51 crc kubenswrapper[4647]: I1128 16:48:51.495765 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-7qjfk_24afbb10-6dd7-4492-9340-287a2b45d450/cert-manager-webhook/0.log" Nov 28 16:48:51 crc kubenswrapper[4647]: I1128 16:48:51.523733 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-6dlfq_15cf6e95-03c8-49db-9029-2fd5f51e14c1/cert-manager-cainjector/0.log" Nov 28 16:48:55 crc kubenswrapper[4647]: I1128 16:48:55.394225 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:48:55 crc kubenswrapper[4647]: E1128 16:48:55.394823 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:49:04 crc kubenswrapper[4647]: I1128 16:49:04.066878 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-njhtz_247287a6-7c4f-4dae-ab2e-9e9d144fcdd4/nmstate-console-plugin/0.log" Nov 28 16:49:04 crc kubenswrapper[4647]: I1128 16:49:04.220094 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-2vmmg_e0476ba4-d83b-4a10-9898-fe3b6b05f76e/nmstate-handler/0.log" Nov 28 16:49:04 crc kubenswrapper[4647]: I1128 16:49:04.311404 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-wxjgv_6b13a440-59b0-440a-bcf5-164d5f29ceba/kube-rbac-proxy/0.log" Nov 28 16:49:04 crc kubenswrapper[4647]: I1128 16:49:04.347555 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-wxjgv_6b13a440-59b0-440a-bcf5-164d5f29ceba/nmstate-metrics/0.log" Nov 28 16:49:04 crc kubenswrapper[4647]: I1128 16:49:04.483474 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-2k6zn_22b9216f-dccd-4cc4-ac15-770a5edc610b/nmstate-operator/0.log" Nov 28 16:49:04 crc kubenswrapper[4647]: I1128 16:49:04.590446 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-fs4lf_8110d8bf-8ce5-415f-857c-6a89c9729b32/nmstate-webhook/0.log" Nov 28 16:49:07 crc kubenswrapper[4647]: I1128 16:49:07.394285 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:49:07 crc kubenswrapper[4647]: E1128 16:49:07.394634 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:49:18 crc kubenswrapper[4647]: I1128 16:49:18.394830 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:49:18 crc kubenswrapper[4647]: E1128 16:49:18.395577 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:49:20 crc kubenswrapper[4647]: I1128 16:49:20.530197 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-xqlc5_c4214344-1c2e-48f0-a1cb-c0a0414c8e77/controller/0.log" Nov 28 16:49:20 crc kubenswrapper[4647]: I1128 16:49:20.559980 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-xqlc5_c4214344-1c2e-48f0-a1cb-c0a0414c8e77/kube-rbac-proxy/0.log" Nov 28 16:49:20 crc kubenswrapper[4647]: I1128 16:49:20.767721 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-frr-files/0.log" Nov 28 16:49:20 crc kubenswrapper[4647]: I1128 16:49:20.939674 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-metrics/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.012788 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-frr-files/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.015344 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-reloader/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.036742 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-reloader/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.270085 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-frr-files/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.334200 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-metrics/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.344745 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-reloader/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.358108 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-metrics/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.557177 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-reloader/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.651674 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/controller/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.672647 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-metrics/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.689879 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-frr-files/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.906213 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/frr-metrics/0.log" Nov 28 16:49:21 crc kubenswrapper[4647]: I1128 16:49:21.984472 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/kube-rbac-proxy/0.log" Nov 28 16:49:22 crc kubenswrapper[4647]: I1128 16:49:22.055051 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/kube-rbac-proxy-frr/0.log" Nov 28 16:49:22 crc kubenswrapper[4647]: I1128 16:49:22.214869 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/reloader/0.log" Nov 28 16:49:22 crc kubenswrapper[4647]: I1128 16:49:22.409153 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-74jft_6850e82e-6e47-4f7e-a861-aa1e2f29b468/frr-k8s-webhook-server/0.log" Nov 28 16:49:22 crc kubenswrapper[4647]: I1128 16:49:22.916000 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/frr/0.log" Nov 28 16:49:22 crc kubenswrapper[4647]: I1128 16:49:22.920464 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6b8f4b57d8-9nmkq_660b76dc-783c-4df5-938b-7df9e2af467a/manager/0.log" Nov 28 16:49:23 crc kubenswrapper[4647]: I1128 16:49:23.109019 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6846cd54fc-hhsz2_a68e1898-f4f8-468a-98fc-03e4f01397e4/webhook-server/0.log" Nov 28 16:49:23 crc kubenswrapper[4647]: I1128 16:49:23.187371 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2csd4_8ee0a7ea-967a-457c-9d3b-1eb46c99b719/kube-rbac-proxy/0.log" Nov 28 16:49:23 crc kubenswrapper[4647]: I1128 16:49:23.542321 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2csd4_8ee0a7ea-967a-457c-9d3b-1eb46c99b719/speaker/0.log" Nov 28 16:49:30 crc kubenswrapper[4647]: I1128 16:49:30.398916 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:49:30 crc kubenswrapper[4647]: E1128 16:49:30.399802 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:49:39 crc kubenswrapper[4647]: I1128 16:49:39.185273 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/util/0.log" Nov 28 16:49:39 crc kubenswrapper[4647]: I1128 16:49:39.420948 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/util/0.log" Nov 28 16:49:39 crc kubenswrapper[4647]: I1128 16:49:39.583730 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/pull/0.log" Nov 28 16:49:39 crc kubenswrapper[4647]: I1128 16:49:39.584107 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/pull/0.log" Nov 28 16:49:39 crc kubenswrapper[4647]: I1128 16:49:39.585511 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/pull/0.log" Nov 28 16:49:39 crc kubenswrapper[4647]: I1128 16:49:39.817559 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/util/0.log" Nov 28 16:49:39 crc kubenswrapper[4647]: I1128 16:49:39.866864 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/extract/0.log" Nov 28 16:49:39 crc kubenswrapper[4647]: I1128 16:49:39.871764 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/util/0.log" Nov 28 16:49:40 crc kubenswrapper[4647]: I1128 16:49:40.035973 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/util/0.log" Nov 28 16:49:40 crc kubenswrapper[4647]: I1128 16:49:40.076939 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/pull/0.log" Nov 28 16:49:40 crc kubenswrapper[4647]: I1128 16:49:40.088340 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/pull/0.log" Nov 28 16:49:40 crc kubenswrapper[4647]: I1128 16:49:40.582247 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/extract/0.log" Nov 28 16:49:40 crc kubenswrapper[4647]: I1128 16:49:40.641147 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/pull/0.log" Nov 28 16:49:40 crc kubenswrapper[4647]: I1128 16:49:40.653970 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/util/0.log" Nov 28 16:49:40 crc kubenswrapper[4647]: I1128 16:49:40.821854 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-utilities/0.log" Nov 28 16:49:41 crc kubenswrapper[4647]: I1128 16:49:41.043549 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-utilities/0.log" Nov 28 16:49:41 crc kubenswrapper[4647]: I1128 16:49:41.093753 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-content/0.log" Nov 28 16:49:41 crc kubenswrapper[4647]: I1128 16:49:41.097279 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-content/0.log" Nov 28 16:49:41 crc kubenswrapper[4647]: I1128 16:49:41.267961 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-utilities/0.log" Nov 28 16:49:41 crc kubenswrapper[4647]: I1128 16:49:41.307719 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-content/0.log" Nov 28 16:49:41 crc kubenswrapper[4647]: I1128 16:49:41.395209 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:49:41 crc kubenswrapper[4647]: E1128 16:49:41.395924 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:49:42 crc kubenswrapper[4647]: I1128 16:49:42.060697 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-utilities/0.log" Nov 28 16:49:42 crc kubenswrapper[4647]: I1128 16:49:42.095286 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/registry-server/0.log" Nov 28 16:49:42 crc kubenswrapper[4647]: I1128 16:49:42.282260 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-content/0.log" Nov 28 16:49:42 crc kubenswrapper[4647]: I1128 16:49:42.311377 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-utilities/0.log" Nov 28 16:49:42 crc kubenswrapper[4647]: I1128 16:49:42.322821 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-content/0.log" Nov 28 16:49:42 crc kubenswrapper[4647]: I1128 16:49:42.544401 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-utilities/0.log" Nov 28 16:49:42 crc kubenswrapper[4647]: I1128 16:49:42.613071 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-content/0.log" Nov 28 16:49:42 crc kubenswrapper[4647]: I1128 16:49:42.949723 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-ql2cs_5b489703-e3e6-4ef2-b993-766bd6e12094/marketplace-operator/1.log" Nov 28 16:49:42 crc kubenswrapper[4647]: I1128 16:49:42.972262 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-ql2cs_5b489703-e3e6-4ef2-b993-766bd6e12094/marketplace-operator/2.log" Nov 28 16:49:43 crc kubenswrapper[4647]: I1128 16:49:43.267226 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/registry-server/0.log" Nov 28 16:49:43 crc kubenswrapper[4647]: I1128 16:49:43.486211 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-utilities/0.log" Nov 28 16:49:43 crc kubenswrapper[4647]: I1128 16:49:43.751075 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-utilities/0.log" Nov 28 16:49:43 crc kubenswrapper[4647]: I1128 16:49:43.783092 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-content/0.log" Nov 28 16:49:43 crc kubenswrapper[4647]: I1128 16:49:43.847003 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-content/0.log" Nov 28 16:49:44 crc kubenswrapper[4647]: I1128 16:49:44.016725 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-content/0.log" Nov 28 16:49:44 crc kubenswrapper[4647]: I1128 16:49:44.028170 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-utilities/0.log" Nov 28 16:49:44 crc kubenswrapper[4647]: I1128 16:49:44.074529 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/registry-server/0.log" Nov 28 16:49:44 crc kubenswrapper[4647]: I1128 16:49:44.146799 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-utilities/0.log" Nov 28 16:49:44 crc kubenswrapper[4647]: I1128 16:49:44.355672 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-content/0.log" Nov 28 16:49:44 crc kubenswrapper[4647]: I1128 16:49:44.426578 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-utilities/0.log" Nov 28 16:49:44 crc kubenswrapper[4647]: I1128 16:49:44.435260 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-content/0.log" Nov 28 16:49:44 crc kubenswrapper[4647]: I1128 16:49:44.640191 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-utilities/0.log" Nov 28 16:49:44 crc kubenswrapper[4647]: I1128 16:49:44.659714 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-content/0.log" Nov 28 16:49:45 crc kubenswrapper[4647]: I1128 16:49:45.169701 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/registry-server/0.log" Nov 28 16:49:55 crc kubenswrapper[4647]: I1128 16:49:55.394948 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:49:55 crc kubenswrapper[4647]: E1128 16:49:55.395791 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:50:09 crc kubenswrapper[4647]: I1128 16:50:09.394794 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:50:09 crc kubenswrapper[4647]: E1128 16:50:09.395451 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:50:23 crc kubenswrapper[4647]: I1128 16:50:23.394313 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:50:23 crc kubenswrapper[4647]: E1128 16:50:23.395053 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:50:34 crc kubenswrapper[4647]: I1128 16:50:34.395506 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:50:34 crc kubenswrapper[4647]: E1128 16:50:34.396359 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:50:46 crc kubenswrapper[4647]: I1128 16:50:46.398875 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:50:46 crc kubenswrapper[4647]: E1128 16:50:46.399550 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:50:59 crc kubenswrapper[4647]: I1128 16:50:59.394732 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:50:59 crc kubenswrapper[4647]: E1128 16:50:59.398621 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:51:13 crc kubenswrapper[4647]: I1128 16:51:13.394504 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:51:13 crc kubenswrapper[4647]: E1128 16:51:13.396942 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.796324 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zxvgk"] Nov 28 16:51:21 crc kubenswrapper[4647]: E1128 16:51:21.797793 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerName="extract-utilities" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.797811 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerName="extract-utilities" Nov 28 16:51:21 crc kubenswrapper[4647]: E1128 16:51:21.797821 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerName="extract-content" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.797829 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerName="extract-content" Nov 28 16:51:21 crc kubenswrapper[4647]: E1128 16:51:21.797855 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerName="registry-server" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.797862 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerName="registry-server" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.798095 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="760cd115-15dd-4a88-b745-08a07b9fce9f" containerName="registry-server" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.799804 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.820101 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wb7m\" (UniqueName: \"kubernetes.io/projected/031f675b-45d8-4a9e-aab9-483cf7e894d2-kube-api-access-5wb7m\") pod \"redhat-marketplace-zxvgk\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.820191 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-catalog-content\") pod \"redhat-marketplace-zxvgk\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.820254 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-utilities\") pod \"redhat-marketplace-zxvgk\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.834926 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zxvgk"] Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.921558 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-utilities\") pod \"redhat-marketplace-zxvgk\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.921677 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wb7m\" (UniqueName: \"kubernetes.io/projected/031f675b-45d8-4a9e-aab9-483cf7e894d2-kube-api-access-5wb7m\") pod \"redhat-marketplace-zxvgk\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.921735 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-catalog-content\") pod \"redhat-marketplace-zxvgk\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.922057 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-utilities\") pod \"redhat-marketplace-zxvgk\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.922092 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-catalog-content\") pod \"redhat-marketplace-zxvgk\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:21 crc kubenswrapper[4647]: I1128 16:51:21.946149 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wb7m\" (UniqueName: \"kubernetes.io/projected/031f675b-45d8-4a9e-aab9-483cf7e894d2-kube-api-access-5wb7m\") pod \"redhat-marketplace-zxvgk\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:22 crc kubenswrapper[4647]: I1128 16:51:22.135635 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:23 crc kubenswrapper[4647]: I1128 16:51:23.346634 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zxvgk"] Nov 28 16:51:24 crc kubenswrapper[4647]: I1128 16:51:24.186934 4647 generic.go:334] "Generic (PLEG): container finished" podID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerID="3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c" exitCode=0 Nov 28 16:51:24 crc kubenswrapper[4647]: I1128 16:51:24.187483 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zxvgk" event={"ID":"031f675b-45d8-4a9e-aab9-483cf7e894d2","Type":"ContainerDied","Data":"3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c"} Nov 28 16:51:24 crc kubenswrapper[4647]: I1128 16:51:24.187785 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zxvgk" event={"ID":"031f675b-45d8-4a9e-aab9-483cf7e894d2","Type":"ContainerStarted","Data":"999e3e2e887a69a5aff5e0029e01d45741abb008cd1fc1db8dc1a8f69ce77777"} Nov 28 16:51:24 crc kubenswrapper[4647]: I1128 16:51:24.191530 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:51:26 crc kubenswrapper[4647]: I1128 16:51:26.221508 4647 generic.go:334] "Generic (PLEG): container finished" podID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerID="f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc" exitCode=0 Nov 28 16:51:26 crc kubenswrapper[4647]: I1128 16:51:26.221982 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zxvgk" event={"ID":"031f675b-45d8-4a9e-aab9-483cf7e894d2","Type":"ContainerDied","Data":"f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc"} Nov 28 16:51:27 crc kubenswrapper[4647]: I1128 16:51:27.234928 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zxvgk" event={"ID":"031f675b-45d8-4a9e-aab9-483cf7e894d2","Type":"ContainerStarted","Data":"00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b"} Nov 28 16:51:27 crc kubenswrapper[4647]: I1128 16:51:27.257875 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zxvgk" podStartSLOduration=3.77490426 podStartE2EDuration="6.257848458s" podCreationTimestamp="2025-11-28 16:51:21 +0000 UTC" firstStartedPulling="2025-11-28 16:51:24.191186552 +0000 UTC m=+5214.038792993" lastFinishedPulling="2025-11-28 16:51:26.67413077 +0000 UTC m=+5216.521737191" observedRunningTime="2025-11-28 16:51:27.25752412 +0000 UTC m=+5217.105130541" watchObservedRunningTime="2025-11-28 16:51:27.257848458 +0000 UTC m=+5217.105454879" Nov 28 16:51:27 crc kubenswrapper[4647]: I1128 16:51:27.394876 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:51:27 crc kubenswrapper[4647]: E1128 16:51:27.395160 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:51:32 crc kubenswrapper[4647]: I1128 16:51:32.136279 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:32 crc kubenswrapper[4647]: I1128 16:51:32.137891 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:32 crc kubenswrapper[4647]: I1128 16:51:32.211865 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:32 crc kubenswrapper[4647]: I1128 16:51:32.362257 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:32 crc kubenswrapper[4647]: I1128 16:51:32.464753 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zxvgk"] Nov 28 16:51:34 crc kubenswrapper[4647]: I1128 16:51:34.318158 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zxvgk" podUID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerName="registry-server" containerID="cri-o://00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b" gracePeriod=2 Nov 28 16:51:34 crc kubenswrapper[4647]: I1128 16:51:34.798112 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:34 crc kubenswrapper[4647]: I1128 16:51:34.910605 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-catalog-content\") pod \"031f675b-45d8-4a9e-aab9-483cf7e894d2\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " Nov 28 16:51:34 crc kubenswrapper[4647]: I1128 16:51:34.910890 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-utilities\") pod \"031f675b-45d8-4a9e-aab9-483cf7e894d2\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " Nov 28 16:51:34 crc kubenswrapper[4647]: I1128 16:51:34.910926 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wb7m\" (UniqueName: \"kubernetes.io/projected/031f675b-45d8-4a9e-aab9-483cf7e894d2-kube-api-access-5wb7m\") pod \"031f675b-45d8-4a9e-aab9-483cf7e894d2\" (UID: \"031f675b-45d8-4a9e-aab9-483cf7e894d2\") " Nov 28 16:51:34 crc kubenswrapper[4647]: I1128 16:51:34.912168 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-utilities" (OuterVolumeSpecName: "utilities") pod "031f675b-45d8-4a9e-aab9-483cf7e894d2" (UID: "031f675b-45d8-4a9e-aab9-483cf7e894d2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:51:34 crc kubenswrapper[4647]: I1128 16:51:34.920485 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/031f675b-45d8-4a9e-aab9-483cf7e894d2-kube-api-access-5wb7m" (OuterVolumeSpecName: "kube-api-access-5wb7m") pod "031f675b-45d8-4a9e-aab9-483cf7e894d2" (UID: "031f675b-45d8-4a9e-aab9-483cf7e894d2"). InnerVolumeSpecName "kube-api-access-5wb7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:51:34 crc kubenswrapper[4647]: I1128 16:51:34.934582 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "031f675b-45d8-4a9e-aab9-483cf7e894d2" (UID: "031f675b-45d8-4a9e-aab9-483cf7e894d2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.016822 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.018464 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wb7m\" (UniqueName: \"kubernetes.io/projected/031f675b-45d8-4a9e-aab9-483cf7e894d2-kube-api-access-5wb7m\") on node \"crc\" DevicePath \"\"" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.018556 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031f675b-45d8-4a9e-aab9-483cf7e894d2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.336494 4647 generic.go:334] "Generic (PLEG): container finished" podID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerID="00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b" exitCode=0 Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.337637 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zxvgk" event={"ID":"031f675b-45d8-4a9e-aab9-483cf7e894d2","Type":"ContainerDied","Data":"00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b"} Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.337832 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zxvgk" event={"ID":"031f675b-45d8-4a9e-aab9-483cf7e894d2","Type":"ContainerDied","Data":"999e3e2e887a69a5aff5e0029e01d45741abb008cd1fc1db8dc1a8f69ce77777"} Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.337975 4647 scope.go:117] "RemoveContainer" containerID="00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.338273 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zxvgk" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.378730 4647 scope.go:117] "RemoveContainer" containerID="f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.406541 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zxvgk"] Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.418622 4647 scope.go:117] "RemoveContainer" containerID="3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.422387 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zxvgk"] Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.482651 4647 scope.go:117] "RemoveContainer" containerID="00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b" Nov 28 16:51:35 crc kubenswrapper[4647]: E1128 16:51:35.483872 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b\": container with ID starting with 00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b not found: ID does not exist" containerID="00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.484019 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b"} err="failed to get container status \"00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b\": rpc error: code = NotFound desc = could not find container \"00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b\": container with ID starting with 00889d9c926fca571a73dc5bdd21e76fe7461c60c170b6e8a8ef95bf8f8c8e4b not found: ID does not exist" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.484149 4647 scope.go:117] "RemoveContainer" containerID="f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc" Nov 28 16:51:35 crc kubenswrapper[4647]: E1128 16:51:35.484491 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc\": container with ID starting with f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc not found: ID does not exist" containerID="f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.484671 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc"} err="failed to get container status \"f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc\": rpc error: code = NotFound desc = could not find container \"f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc\": container with ID starting with f3ff472b4b7f5471bb6844ee18a983840afdff6a18e7bf8e568ec707788dfebc not found: ID does not exist" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.484770 4647 scope.go:117] "RemoveContainer" containerID="3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c" Nov 28 16:51:35 crc kubenswrapper[4647]: E1128 16:51:35.485213 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c\": container with ID starting with 3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c not found: ID does not exist" containerID="3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c" Nov 28 16:51:35 crc kubenswrapper[4647]: I1128 16:51:35.485375 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c"} err="failed to get container status \"3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c\": rpc error: code = NotFound desc = could not find container \"3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c\": container with ID starting with 3367b44a7b19c0b810ae374760c03afb1ff1d252b57e1d5c13e2bacbd1a3345c not found: ID does not exist" Nov 28 16:51:36 crc kubenswrapper[4647]: I1128 16:51:36.416506 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="031f675b-45d8-4a9e-aab9-483cf7e894d2" path="/var/lib/kubelet/pods/031f675b-45d8-4a9e-aab9-483cf7e894d2/volumes" Nov 28 16:51:38 crc kubenswrapper[4647]: I1128 16:51:38.397100 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:51:38 crc kubenswrapper[4647]: E1128 16:51:38.397596 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:51:52 crc kubenswrapper[4647]: I1128 16:51:52.395402 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:51:53 crc kubenswrapper[4647]: I1128 16:51:53.532065 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"b1ee664078d688ed8630f5b377418d63110b6bb1129ca815ab136e42057bf59b"} Nov 28 16:52:14 crc kubenswrapper[4647]: I1128 16:52:14.772913 4647 generic.go:334] "Generic (PLEG): container finished" podID="b63976eb-8fb0-4a44-92ab-f4dda621eea1" containerID="d3c995fb82186749407a94b168452e61f2012123d096570740e72a185c2b60b5" exitCode=0 Nov 28 16:52:14 crc kubenswrapper[4647]: I1128 16:52:14.773449 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-xwzm8/must-gather-hz5rs" event={"ID":"b63976eb-8fb0-4a44-92ab-f4dda621eea1","Type":"ContainerDied","Data":"d3c995fb82186749407a94b168452e61f2012123d096570740e72a185c2b60b5"} Nov 28 16:52:14 crc kubenswrapper[4647]: I1128 16:52:14.774190 4647 scope.go:117] "RemoveContainer" containerID="d3c995fb82186749407a94b168452e61f2012123d096570740e72a185c2b60b5" Nov 28 16:52:15 crc kubenswrapper[4647]: I1128 16:52:15.522680 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xwzm8_must-gather-hz5rs_b63976eb-8fb0-4a44-92ab-f4dda621eea1/gather/0.log" Nov 28 16:52:24 crc kubenswrapper[4647]: I1128 16:52:24.150892 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-xwzm8/must-gather-hz5rs"] Nov 28 16:52:24 crc kubenswrapper[4647]: I1128 16:52:24.152985 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-xwzm8/must-gather-hz5rs" podUID="b63976eb-8fb0-4a44-92ab-f4dda621eea1" containerName="copy" containerID="cri-o://3f0867b0a0cb2359e119f47b275738f624943e43b573db2b3dbea1515951b3fe" gracePeriod=2 Nov 28 16:52:24 crc kubenswrapper[4647]: I1128 16:52:24.160609 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-xwzm8/must-gather-hz5rs"] Nov 28 16:52:25 crc kubenswrapper[4647]: I1128 16:52:25.369082 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xwzm8_must-gather-hz5rs_b63976eb-8fb0-4a44-92ab-f4dda621eea1/copy/0.log" Nov 28 16:52:25 crc kubenswrapper[4647]: I1128 16:52:25.370590 4647 generic.go:334] "Generic (PLEG): container finished" podID="b63976eb-8fb0-4a44-92ab-f4dda621eea1" containerID="3f0867b0a0cb2359e119f47b275738f624943e43b573db2b3dbea1515951b3fe" exitCode=143 Nov 28 16:52:25 crc kubenswrapper[4647]: I1128 16:52:25.482367 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xwzm8_must-gather-hz5rs_b63976eb-8fb0-4a44-92ab-f4dda621eea1/copy/0.log" Nov 28 16:52:25 crc kubenswrapper[4647]: I1128 16:52:25.482688 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/must-gather-hz5rs" Nov 28 16:52:25 crc kubenswrapper[4647]: I1128 16:52:25.589652 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h6c5h\" (UniqueName: \"kubernetes.io/projected/b63976eb-8fb0-4a44-92ab-f4dda621eea1-kube-api-access-h6c5h\") pod \"b63976eb-8fb0-4a44-92ab-f4dda621eea1\" (UID: \"b63976eb-8fb0-4a44-92ab-f4dda621eea1\") " Nov 28 16:52:25 crc kubenswrapper[4647]: I1128 16:52:25.589721 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b63976eb-8fb0-4a44-92ab-f4dda621eea1-must-gather-output\") pod \"b63976eb-8fb0-4a44-92ab-f4dda621eea1\" (UID: \"b63976eb-8fb0-4a44-92ab-f4dda621eea1\") " Nov 28 16:52:25 crc kubenswrapper[4647]: I1128 16:52:25.614712 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b63976eb-8fb0-4a44-92ab-f4dda621eea1-kube-api-access-h6c5h" (OuterVolumeSpecName: "kube-api-access-h6c5h") pod "b63976eb-8fb0-4a44-92ab-f4dda621eea1" (UID: "b63976eb-8fb0-4a44-92ab-f4dda621eea1"). InnerVolumeSpecName "kube-api-access-h6c5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:52:25 crc kubenswrapper[4647]: I1128 16:52:25.694671 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h6c5h\" (UniqueName: \"kubernetes.io/projected/b63976eb-8fb0-4a44-92ab-f4dda621eea1-kube-api-access-h6c5h\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:25 crc kubenswrapper[4647]: I1128 16:52:25.788661 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b63976eb-8fb0-4a44-92ab-f4dda621eea1-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "b63976eb-8fb0-4a44-92ab-f4dda621eea1" (UID: "b63976eb-8fb0-4a44-92ab-f4dda621eea1"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:52:25 crc kubenswrapper[4647]: I1128 16:52:25.795904 4647 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b63976eb-8fb0-4a44-92ab-f4dda621eea1-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:26 crc kubenswrapper[4647]: I1128 16:52:26.381917 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-xwzm8_must-gather-hz5rs_b63976eb-8fb0-4a44-92ab-f4dda621eea1/copy/0.log" Nov 28 16:52:26 crc kubenswrapper[4647]: I1128 16:52:26.382852 4647 scope.go:117] "RemoveContainer" containerID="3f0867b0a0cb2359e119f47b275738f624943e43b573db2b3dbea1515951b3fe" Nov 28 16:52:26 crc kubenswrapper[4647]: I1128 16:52:26.382880 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-xwzm8/must-gather-hz5rs" Nov 28 16:52:26 crc kubenswrapper[4647]: I1128 16:52:26.408029 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b63976eb-8fb0-4a44-92ab-f4dda621eea1" path="/var/lib/kubelet/pods/b63976eb-8fb0-4a44-92ab-f4dda621eea1/volumes" Nov 28 16:52:26 crc kubenswrapper[4647]: I1128 16:52:26.414672 4647 scope.go:117] "RemoveContainer" containerID="d3c995fb82186749407a94b168452e61f2012123d096570740e72a185c2b60b5" Nov 28 16:53:20 crc kubenswrapper[4647]: I1128 16:53:20.145362 4647 scope.go:117] "RemoveContainer" containerID="8cbd86030c206ddc56e6ee834bd12bcea2a48b0bb2b3d6a2d39a1eeb609936b3" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.187980 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xg2d8"] Nov 28 16:54:08 crc kubenswrapper[4647]: E1128 16:54:08.188901 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b63976eb-8fb0-4a44-92ab-f4dda621eea1" containerName="gather" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.188914 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b63976eb-8fb0-4a44-92ab-f4dda621eea1" containerName="gather" Nov 28 16:54:08 crc kubenswrapper[4647]: E1128 16:54:08.188925 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerName="registry-server" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.188932 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerName="registry-server" Nov 28 16:54:08 crc kubenswrapper[4647]: E1128 16:54:08.188947 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerName="extract-utilities" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.188955 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerName="extract-utilities" Nov 28 16:54:08 crc kubenswrapper[4647]: E1128 16:54:08.188969 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b63976eb-8fb0-4a44-92ab-f4dda621eea1" containerName="copy" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.188975 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="b63976eb-8fb0-4a44-92ab-f4dda621eea1" containerName="copy" Nov 28 16:54:08 crc kubenswrapper[4647]: E1128 16:54:08.188985 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerName="extract-content" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.188991 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerName="extract-content" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.189166 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="031f675b-45d8-4a9e-aab9-483cf7e894d2" containerName="registry-server" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.189180 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="b63976eb-8fb0-4a44-92ab-f4dda621eea1" containerName="copy" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.189201 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="b63976eb-8fb0-4a44-92ab-f4dda621eea1" containerName="gather" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.190524 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.220478 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xg2d8"] Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.265107 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-catalog-content\") pod \"community-operators-xg2d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.265292 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr65w\" (UniqueName: \"kubernetes.io/projected/7c57cfd8-2557-4fe0-b90b-e16080e484d8-kube-api-access-xr65w\") pod \"community-operators-xg2d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.265523 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-utilities\") pod \"community-operators-xg2d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.367329 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr65w\" (UniqueName: \"kubernetes.io/projected/7c57cfd8-2557-4fe0-b90b-e16080e484d8-kube-api-access-xr65w\") pod \"community-operators-xg2d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.367442 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-utilities\") pod \"community-operators-xg2d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.367476 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-catalog-content\") pod \"community-operators-xg2d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.368130 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-catalog-content\") pod \"community-operators-xg2d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.368680 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-utilities\") pod \"community-operators-xg2d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.403492 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr65w\" (UniqueName: \"kubernetes.io/projected/7c57cfd8-2557-4fe0-b90b-e16080e484d8-kube-api-access-xr65w\") pod \"community-operators-xg2d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:08 crc kubenswrapper[4647]: I1128 16:54:08.514895 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:09 crc kubenswrapper[4647]: I1128 16:54:09.030504 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xg2d8"] Nov 28 16:54:09 crc kubenswrapper[4647]: I1128 16:54:09.454443 4647 generic.go:334] "Generic (PLEG): container finished" podID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerID="85e1d88a43dc35e8080a2212b809ba3e46e8ab01d6d7b524cc37f01708126700" exitCode=0 Nov 28 16:54:09 crc kubenswrapper[4647]: I1128 16:54:09.454506 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xg2d8" event={"ID":"7c57cfd8-2557-4fe0-b90b-e16080e484d8","Type":"ContainerDied","Data":"85e1d88a43dc35e8080a2212b809ba3e46e8ab01d6d7b524cc37f01708126700"} Nov 28 16:54:09 crc kubenswrapper[4647]: I1128 16:54:09.454777 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xg2d8" event={"ID":"7c57cfd8-2557-4fe0-b90b-e16080e484d8","Type":"ContainerStarted","Data":"183fea85c9f90cc0472eff4069ea9f8ac63c54871fedab240c14cbc4a8612a68"} Nov 28 16:54:11 crc kubenswrapper[4647]: I1128 16:54:11.481999 4647 generic.go:334] "Generic (PLEG): container finished" podID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerID="80b25b44307a1e1d1bec3641d2aca649f81d704e9f4d55c06eaeff5c0be82c3a" exitCode=0 Nov 28 16:54:11 crc kubenswrapper[4647]: I1128 16:54:11.482095 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xg2d8" event={"ID":"7c57cfd8-2557-4fe0-b90b-e16080e484d8","Type":"ContainerDied","Data":"80b25b44307a1e1d1bec3641d2aca649f81d704e9f4d55c06eaeff5c0be82c3a"} Nov 28 16:54:12 crc kubenswrapper[4647]: I1128 16:54:12.494365 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xg2d8" event={"ID":"7c57cfd8-2557-4fe0-b90b-e16080e484d8","Type":"ContainerStarted","Data":"65fe59caa46602b8417c214b0293302a7297165f4aa6e6241499809395859a76"} Nov 28 16:54:12 crc kubenswrapper[4647]: I1128 16:54:12.517944 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xg2d8" podStartSLOduration=1.842736703 podStartE2EDuration="4.517913358s" podCreationTimestamp="2025-11-28 16:54:08 +0000 UTC" firstStartedPulling="2025-11-28 16:54:09.456637816 +0000 UTC m=+5379.304244237" lastFinishedPulling="2025-11-28 16:54:12.131814461 +0000 UTC m=+5381.979420892" observedRunningTime="2025-11-28 16:54:12.510114279 +0000 UTC m=+5382.357720710" watchObservedRunningTime="2025-11-28 16:54:12.517913358 +0000 UTC m=+5382.365519809" Nov 28 16:54:17 crc kubenswrapper[4647]: I1128 16:54:17.022750 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:54:17 crc kubenswrapper[4647]: I1128 16:54:17.023445 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:54:18 crc kubenswrapper[4647]: I1128 16:54:18.516354 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:18 crc kubenswrapper[4647]: I1128 16:54:18.518341 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:18 crc kubenswrapper[4647]: I1128 16:54:18.584818 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:19 crc kubenswrapper[4647]: I1128 16:54:19.614433 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:19 crc kubenswrapper[4647]: I1128 16:54:19.671349 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xg2d8"] Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.582350 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xg2d8" podUID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerName="registry-server" containerID="cri-o://65fe59caa46602b8417c214b0293302a7297165f4aa6e6241499809395859a76" gracePeriod=2 Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.674362 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cs46f"] Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.677737 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.687076 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cs46f"] Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.771593 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-catalog-content\") pod \"redhat-operators-cs46f\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.772158 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-utilities\") pod \"redhat-operators-cs46f\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.772261 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hgb6\" (UniqueName: \"kubernetes.io/projected/8bc6b714-4507-4297-908f-301ef5c6987a-kube-api-access-6hgb6\") pod \"redhat-operators-cs46f\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.874503 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hgb6\" (UniqueName: \"kubernetes.io/projected/8bc6b714-4507-4297-908f-301ef5c6987a-kube-api-access-6hgb6\") pod \"redhat-operators-cs46f\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.874613 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-catalog-content\") pod \"redhat-operators-cs46f\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.874686 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-utilities\") pod \"redhat-operators-cs46f\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.875200 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-utilities\") pod \"redhat-operators-cs46f\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.875373 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-catalog-content\") pod \"redhat-operators-cs46f\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:21 crc kubenswrapper[4647]: I1128 16:54:21.899249 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hgb6\" (UniqueName: \"kubernetes.io/projected/8bc6b714-4507-4297-908f-301ef5c6987a-kube-api-access-6hgb6\") pod \"redhat-operators-cs46f\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:22 crc kubenswrapper[4647]: I1128 16:54:22.052668 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:22 crc kubenswrapper[4647]: I1128 16:54:22.552309 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cs46f"] Nov 28 16:54:22 crc kubenswrapper[4647]: W1128 16:54:22.567959 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8bc6b714_4507_4297_908f_301ef5c6987a.slice/crio-a96b1c3a72121545eb42b5a3ff22b0bc3ba979f44ac88acec01dcad4bf3f9de5 WatchSource:0}: Error finding container a96b1c3a72121545eb42b5a3ff22b0bc3ba979f44ac88acec01dcad4bf3f9de5: Status 404 returned error can't find the container with id a96b1c3a72121545eb42b5a3ff22b0bc3ba979f44ac88acec01dcad4bf3f9de5 Nov 28 16:54:22 crc kubenswrapper[4647]: I1128 16:54:22.613321 4647 generic.go:334] "Generic (PLEG): container finished" podID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerID="65fe59caa46602b8417c214b0293302a7297165f4aa6e6241499809395859a76" exitCode=0 Nov 28 16:54:22 crc kubenswrapper[4647]: I1128 16:54:22.613425 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xg2d8" event={"ID":"7c57cfd8-2557-4fe0-b90b-e16080e484d8","Type":"ContainerDied","Data":"65fe59caa46602b8417c214b0293302a7297165f4aa6e6241499809395859a76"} Nov 28 16:54:22 crc kubenswrapper[4647]: I1128 16:54:22.613455 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xg2d8" event={"ID":"7c57cfd8-2557-4fe0-b90b-e16080e484d8","Type":"ContainerDied","Data":"183fea85c9f90cc0472eff4069ea9f8ac63c54871fedab240c14cbc4a8612a68"} Nov 28 16:54:22 crc kubenswrapper[4647]: I1128 16:54:22.613468 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="183fea85c9f90cc0472eff4069ea9f8ac63c54871fedab240c14cbc4a8612a68" Nov 28 16:54:22 crc kubenswrapper[4647]: I1128 16:54:22.614586 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cs46f" event={"ID":"8bc6b714-4507-4297-908f-301ef5c6987a","Type":"ContainerStarted","Data":"a96b1c3a72121545eb42b5a3ff22b0bc3ba979f44ac88acec01dcad4bf3f9de5"} Nov 28 16:54:22 crc kubenswrapper[4647]: I1128 16:54:22.832777 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.018180 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-utilities\") pod \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.018479 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xr65w\" (UniqueName: \"kubernetes.io/projected/7c57cfd8-2557-4fe0-b90b-e16080e484d8-kube-api-access-xr65w\") pod \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.018530 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-catalog-content\") pod \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\" (UID: \"7c57cfd8-2557-4fe0-b90b-e16080e484d8\") " Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.019454 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-utilities" (OuterVolumeSpecName: "utilities") pod "7c57cfd8-2557-4fe0-b90b-e16080e484d8" (UID: "7c57cfd8-2557-4fe0-b90b-e16080e484d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.027755 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c57cfd8-2557-4fe0-b90b-e16080e484d8-kube-api-access-xr65w" (OuterVolumeSpecName: "kube-api-access-xr65w") pod "7c57cfd8-2557-4fe0-b90b-e16080e484d8" (UID: "7c57cfd8-2557-4fe0-b90b-e16080e484d8"). InnerVolumeSpecName "kube-api-access-xr65w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.081263 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7c57cfd8-2557-4fe0-b90b-e16080e484d8" (UID: "7c57cfd8-2557-4fe0-b90b-e16080e484d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.121433 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.121474 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xr65w\" (UniqueName: \"kubernetes.io/projected/7c57cfd8-2557-4fe0-b90b-e16080e484d8-kube-api-access-xr65w\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.121486 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c57cfd8-2557-4fe0-b90b-e16080e484d8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.626783 4647 generic.go:334] "Generic (PLEG): container finished" podID="8bc6b714-4507-4297-908f-301ef5c6987a" containerID="c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a" exitCode=0 Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.626874 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xg2d8" Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.631832 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cs46f" event={"ID":"8bc6b714-4507-4297-908f-301ef5c6987a","Type":"ContainerDied","Data":"c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a"} Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.700781 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xg2d8"] Nov 28 16:54:23 crc kubenswrapper[4647]: I1128 16:54:23.722931 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xg2d8"] Nov 28 16:54:24 crc kubenswrapper[4647]: I1128 16:54:24.410829 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" path="/var/lib/kubelet/pods/7c57cfd8-2557-4fe0-b90b-e16080e484d8/volumes" Nov 28 16:54:24 crc kubenswrapper[4647]: I1128 16:54:24.639609 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cs46f" event={"ID":"8bc6b714-4507-4297-908f-301ef5c6987a","Type":"ContainerStarted","Data":"0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7"} Nov 28 16:54:28 crc kubenswrapper[4647]: I1128 16:54:28.680654 4647 generic.go:334] "Generic (PLEG): container finished" podID="8bc6b714-4507-4297-908f-301ef5c6987a" containerID="0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7" exitCode=0 Nov 28 16:54:28 crc kubenswrapper[4647]: I1128 16:54:28.680759 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cs46f" event={"ID":"8bc6b714-4507-4297-908f-301ef5c6987a","Type":"ContainerDied","Data":"0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7"} Nov 28 16:54:30 crc kubenswrapper[4647]: I1128 16:54:30.703642 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cs46f" event={"ID":"8bc6b714-4507-4297-908f-301ef5c6987a","Type":"ContainerStarted","Data":"aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265"} Nov 28 16:54:30 crc kubenswrapper[4647]: I1128 16:54:30.735951 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cs46f" podStartSLOduration=3.886329061 podStartE2EDuration="9.735929777s" podCreationTimestamp="2025-11-28 16:54:21 +0000 UTC" firstStartedPulling="2025-11-28 16:54:23.638207813 +0000 UTC m=+5393.485814234" lastFinishedPulling="2025-11-28 16:54:29.487808509 +0000 UTC m=+5399.335414950" observedRunningTime="2025-11-28 16:54:30.72934657 +0000 UTC m=+5400.576952991" watchObservedRunningTime="2025-11-28 16:54:30.735929777 +0000 UTC m=+5400.583536198" Nov 28 16:54:32 crc kubenswrapper[4647]: I1128 16:54:32.053851 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:32 crc kubenswrapper[4647]: I1128 16:54:32.053891 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:33 crc kubenswrapper[4647]: I1128 16:54:33.109405 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cs46f" podUID="8bc6b714-4507-4297-908f-301ef5c6987a" containerName="registry-server" probeResult="failure" output=< Nov 28 16:54:33 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 16:54:33 crc kubenswrapper[4647]: > Nov 28 16:54:42 crc kubenswrapper[4647]: I1128 16:54:42.118737 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:42 crc kubenswrapper[4647]: I1128 16:54:42.175211 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:42 crc kubenswrapper[4647]: I1128 16:54:42.361558 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cs46f"] Nov 28 16:54:43 crc kubenswrapper[4647]: I1128 16:54:43.820644 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cs46f" podUID="8bc6b714-4507-4297-908f-301ef5c6987a" containerName="registry-server" containerID="cri-o://aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265" gracePeriod=2 Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.507812 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.628456 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-utilities\") pod \"8bc6b714-4507-4297-908f-301ef5c6987a\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.628491 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-catalog-content\") pod \"8bc6b714-4507-4297-908f-301ef5c6987a\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.628758 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hgb6\" (UniqueName: \"kubernetes.io/projected/8bc6b714-4507-4297-908f-301ef5c6987a-kube-api-access-6hgb6\") pod \"8bc6b714-4507-4297-908f-301ef5c6987a\" (UID: \"8bc6b714-4507-4297-908f-301ef5c6987a\") " Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.629770 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-utilities" (OuterVolumeSpecName: "utilities") pod "8bc6b714-4507-4297-908f-301ef5c6987a" (UID: "8bc6b714-4507-4297-908f-301ef5c6987a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.642676 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bc6b714-4507-4297-908f-301ef5c6987a-kube-api-access-6hgb6" (OuterVolumeSpecName: "kube-api-access-6hgb6") pod "8bc6b714-4507-4297-908f-301ef5c6987a" (UID: "8bc6b714-4507-4297-908f-301ef5c6987a"). InnerVolumeSpecName "kube-api-access-6hgb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.730822 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hgb6\" (UniqueName: \"kubernetes.io/projected/8bc6b714-4507-4297-908f-301ef5c6987a-kube-api-access-6hgb6\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.730848 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.752582 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8bc6b714-4507-4297-908f-301ef5c6987a" (UID: "8bc6b714-4507-4297-908f-301ef5c6987a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.832103 4647 generic.go:334] "Generic (PLEG): container finished" podID="8bc6b714-4507-4297-908f-301ef5c6987a" containerID="aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265" exitCode=0 Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.832156 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cs46f" event={"ID":"8bc6b714-4507-4297-908f-301ef5c6987a","Type":"ContainerDied","Data":"aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265"} Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.832183 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cs46f" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.832195 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cs46f" event={"ID":"8bc6b714-4507-4297-908f-301ef5c6987a","Type":"ContainerDied","Data":"a96b1c3a72121545eb42b5a3ff22b0bc3ba979f44ac88acec01dcad4bf3f9de5"} Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.832217 4647 scope.go:117] "RemoveContainer" containerID="aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.832248 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bc6b714-4507-4297-908f-301ef5c6987a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.859890 4647 scope.go:117] "RemoveContainer" containerID="0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.883475 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cs46f"] Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.890306 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cs46f"] Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.893980 4647 scope.go:117] "RemoveContainer" containerID="c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.935586 4647 scope.go:117] "RemoveContainer" containerID="aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265" Nov 28 16:54:44 crc kubenswrapper[4647]: E1128 16:54:44.936553 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265\": container with ID starting with aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265 not found: ID does not exist" containerID="aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.936592 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265"} err="failed to get container status \"aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265\": rpc error: code = NotFound desc = could not find container \"aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265\": container with ID starting with aff2528c18cee1dcfda28c6da9c26fb82c375a3fcd226cea09b5e0209e224265 not found: ID does not exist" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.936617 4647 scope.go:117] "RemoveContainer" containerID="0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7" Nov 28 16:54:44 crc kubenswrapper[4647]: E1128 16:54:44.936950 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7\": container with ID starting with 0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7 not found: ID does not exist" containerID="0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.936976 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7"} err="failed to get container status \"0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7\": rpc error: code = NotFound desc = could not find container \"0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7\": container with ID starting with 0993475910cf5cb8489e44639d779ce75695b6a9c3a5275c1b2987106cd39ca7 not found: ID does not exist" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.936996 4647 scope.go:117] "RemoveContainer" containerID="c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a" Nov 28 16:54:44 crc kubenswrapper[4647]: E1128 16:54:44.937300 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a\": container with ID starting with c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a not found: ID does not exist" containerID="c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a" Nov 28 16:54:44 crc kubenswrapper[4647]: I1128 16:54:44.937325 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a"} err="failed to get container status \"c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a\": rpc error: code = NotFound desc = could not find container \"c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a\": container with ID starting with c11980b61f61f6c8cfdcbf53d808713eefe0d143e5b0de66fd9fb644508ffa2a not found: ID does not exist" Nov 28 16:54:46 crc kubenswrapper[4647]: I1128 16:54:46.409682 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bc6b714-4507-4297-908f-301ef5c6987a" path="/var/lib/kubelet/pods/8bc6b714-4507-4297-908f-301ef5c6987a/volumes" Nov 28 16:54:47 crc kubenswrapper[4647]: I1128 16:54:47.022465 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:54:47 crc kubenswrapper[4647]: I1128 16:54:47.022552 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.249556 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jqx7c/must-gather-h2fw7"] Nov 28 16:55:10 crc kubenswrapper[4647]: E1128 16:55:10.251563 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerName="extract-utilities" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.251661 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerName="extract-utilities" Nov 28 16:55:10 crc kubenswrapper[4647]: E1128 16:55:10.251729 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc6b714-4507-4297-908f-301ef5c6987a" containerName="registry-server" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.251786 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc6b714-4507-4297-908f-301ef5c6987a" containerName="registry-server" Nov 28 16:55:10 crc kubenswrapper[4647]: E1128 16:55:10.251867 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc6b714-4507-4297-908f-301ef5c6987a" containerName="extract-utilities" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.251926 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc6b714-4507-4297-908f-301ef5c6987a" containerName="extract-utilities" Nov 28 16:55:10 crc kubenswrapper[4647]: E1128 16:55:10.252011 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc6b714-4507-4297-908f-301ef5c6987a" containerName="extract-content" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.252075 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc6b714-4507-4297-908f-301ef5c6987a" containerName="extract-content" Nov 28 16:55:10 crc kubenswrapper[4647]: E1128 16:55:10.252161 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerName="extract-content" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.252232 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerName="extract-content" Nov 28 16:55:10 crc kubenswrapper[4647]: E1128 16:55:10.252327 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerName="registry-server" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.252385 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerName="registry-server" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.252680 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c57cfd8-2557-4fe0-b90b-e16080e484d8" containerName="registry-server" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.252764 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bc6b714-4507-4297-908f-301ef5c6987a" containerName="registry-server" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.253864 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/must-gather-h2fw7" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.261050 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-jqx7c"/"default-dockercfg-q4v7g" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.261225 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-jqx7c"/"openshift-service-ca.crt" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.270999 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-jqx7c"/"kube-root-ca.crt" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.301207 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-jqx7c/must-gather-h2fw7"] Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.372997 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3974b935-7762-4f74-a9ed-bda6ae385160-must-gather-output\") pod \"must-gather-h2fw7\" (UID: \"3974b935-7762-4f74-a9ed-bda6ae385160\") " pod="openshift-must-gather-jqx7c/must-gather-h2fw7" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.373358 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h88cs\" (UniqueName: \"kubernetes.io/projected/3974b935-7762-4f74-a9ed-bda6ae385160-kube-api-access-h88cs\") pod \"must-gather-h2fw7\" (UID: \"3974b935-7762-4f74-a9ed-bda6ae385160\") " pod="openshift-must-gather-jqx7c/must-gather-h2fw7" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.475282 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h88cs\" (UniqueName: \"kubernetes.io/projected/3974b935-7762-4f74-a9ed-bda6ae385160-kube-api-access-h88cs\") pod \"must-gather-h2fw7\" (UID: \"3974b935-7762-4f74-a9ed-bda6ae385160\") " pod="openshift-must-gather-jqx7c/must-gather-h2fw7" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.475389 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3974b935-7762-4f74-a9ed-bda6ae385160-must-gather-output\") pod \"must-gather-h2fw7\" (UID: \"3974b935-7762-4f74-a9ed-bda6ae385160\") " pod="openshift-must-gather-jqx7c/must-gather-h2fw7" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.475845 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3974b935-7762-4f74-a9ed-bda6ae385160-must-gather-output\") pod \"must-gather-h2fw7\" (UID: \"3974b935-7762-4f74-a9ed-bda6ae385160\") " pod="openshift-must-gather-jqx7c/must-gather-h2fw7" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.513427 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h88cs\" (UniqueName: \"kubernetes.io/projected/3974b935-7762-4f74-a9ed-bda6ae385160-kube-api-access-h88cs\") pod \"must-gather-h2fw7\" (UID: \"3974b935-7762-4f74-a9ed-bda6ae385160\") " pod="openshift-must-gather-jqx7c/must-gather-h2fw7" Nov 28 16:55:10 crc kubenswrapper[4647]: I1128 16:55:10.575375 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/must-gather-h2fw7" Nov 28 16:55:11 crc kubenswrapper[4647]: I1128 16:55:11.094467 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-jqx7c/must-gather-h2fw7"] Nov 28 16:55:11 crc kubenswrapper[4647]: I1128 16:55:11.145141 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/must-gather-h2fw7" event={"ID":"3974b935-7762-4f74-a9ed-bda6ae385160","Type":"ContainerStarted","Data":"dad6c1c0059a43a1b5766f063dcaa05598edb13016865a3703f636b6d85dd143"} Nov 28 16:55:12 crc kubenswrapper[4647]: I1128 16:55:12.190818 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/must-gather-h2fw7" event={"ID":"3974b935-7762-4f74-a9ed-bda6ae385160","Type":"ContainerStarted","Data":"211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77"} Nov 28 16:55:12 crc kubenswrapper[4647]: I1128 16:55:12.191198 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/must-gather-h2fw7" event={"ID":"3974b935-7762-4f74-a9ed-bda6ae385160","Type":"ContainerStarted","Data":"862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471"} Nov 28 16:55:12 crc kubenswrapper[4647]: I1128 16:55:12.208938 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-jqx7c/must-gather-h2fw7" podStartSLOduration=2.208920864 podStartE2EDuration="2.208920864s" podCreationTimestamp="2025-11-28 16:55:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:55:12.202038579 +0000 UTC m=+5442.049645000" watchObservedRunningTime="2025-11-28 16:55:12.208920864 +0000 UTC m=+5442.056527285" Nov 28 16:55:15 crc kubenswrapper[4647]: I1128 16:55:15.541450 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jqx7c/crc-debug-6x6s6"] Nov 28 16:55:15 crc kubenswrapper[4647]: I1128 16:55:15.543112 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" Nov 28 16:55:15 crc kubenswrapper[4647]: I1128 16:55:15.681600 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqcz9\" (UniqueName: \"kubernetes.io/projected/39972479-1286-4dfb-84c2-21df11b21c3d-kube-api-access-gqcz9\") pod \"crc-debug-6x6s6\" (UID: \"39972479-1286-4dfb-84c2-21df11b21c3d\") " pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" Nov 28 16:55:15 crc kubenswrapper[4647]: I1128 16:55:15.682022 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/39972479-1286-4dfb-84c2-21df11b21c3d-host\") pod \"crc-debug-6x6s6\" (UID: \"39972479-1286-4dfb-84c2-21df11b21c3d\") " pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" Nov 28 16:55:15 crc kubenswrapper[4647]: I1128 16:55:15.784227 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqcz9\" (UniqueName: \"kubernetes.io/projected/39972479-1286-4dfb-84c2-21df11b21c3d-kube-api-access-gqcz9\") pod \"crc-debug-6x6s6\" (UID: \"39972479-1286-4dfb-84c2-21df11b21c3d\") " pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" Nov 28 16:55:15 crc kubenswrapper[4647]: I1128 16:55:15.784346 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/39972479-1286-4dfb-84c2-21df11b21c3d-host\") pod \"crc-debug-6x6s6\" (UID: \"39972479-1286-4dfb-84c2-21df11b21c3d\") " pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" Nov 28 16:55:15 crc kubenswrapper[4647]: I1128 16:55:15.784785 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/39972479-1286-4dfb-84c2-21df11b21c3d-host\") pod \"crc-debug-6x6s6\" (UID: \"39972479-1286-4dfb-84c2-21df11b21c3d\") " pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" Nov 28 16:55:15 crc kubenswrapper[4647]: I1128 16:55:15.811184 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqcz9\" (UniqueName: \"kubernetes.io/projected/39972479-1286-4dfb-84c2-21df11b21c3d-kube-api-access-gqcz9\") pod \"crc-debug-6x6s6\" (UID: \"39972479-1286-4dfb-84c2-21df11b21c3d\") " pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" Nov 28 16:55:15 crc kubenswrapper[4647]: I1128 16:55:15.861848 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" Nov 28 16:55:15 crc kubenswrapper[4647]: W1128 16:55:15.898100 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39972479_1286_4dfb_84c2_21df11b21c3d.slice/crio-836fc3a8706d6a5855970cc8de2c9409e2e69734c08ed041e65efca63a15b58e WatchSource:0}: Error finding container 836fc3a8706d6a5855970cc8de2c9409e2e69734c08ed041e65efca63a15b58e: Status 404 returned error can't find the container with id 836fc3a8706d6a5855970cc8de2c9409e2e69734c08ed041e65efca63a15b58e Nov 28 16:55:16 crc kubenswrapper[4647]: I1128 16:55:16.239537 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" event={"ID":"39972479-1286-4dfb-84c2-21df11b21c3d","Type":"ContainerStarted","Data":"5f6c74f2a576d6183a39bd4d48e00e95610150f65ee55e7d6f054d74a2a7144c"} Nov 28 16:55:16 crc kubenswrapper[4647]: I1128 16:55:16.239919 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" event={"ID":"39972479-1286-4dfb-84c2-21df11b21c3d","Type":"ContainerStarted","Data":"836fc3a8706d6a5855970cc8de2c9409e2e69734c08ed041e65efca63a15b58e"} Nov 28 16:55:16 crc kubenswrapper[4647]: I1128 16:55:16.255507 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" podStartSLOduration=1.255489536 podStartE2EDuration="1.255489536s" podCreationTimestamp="2025-11-28 16:55:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:55:16.255030444 +0000 UTC m=+5446.102636875" watchObservedRunningTime="2025-11-28 16:55:16.255489536 +0000 UTC m=+5446.103095947" Nov 28 16:55:17 crc kubenswrapper[4647]: I1128 16:55:17.023107 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:55:17 crc kubenswrapper[4647]: I1128 16:55:17.023709 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:55:17 crc kubenswrapper[4647]: I1128 16:55:17.023844 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:55:17 crc kubenswrapper[4647]: I1128 16:55:17.024446 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b1ee664078d688ed8630f5b377418d63110b6bb1129ca815ab136e42057bf59b"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:55:17 crc kubenswrapper[4647]: I1128 16:55:17.024509 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://b1ee664078d688ed8630f5b377418d63110b6bb1129ca815ab136e42057bf59b" gracePeriod=600 Nov 28 16:55:17 crc kubenswrapper[4647]: I1128 16:55:17.255941 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="b1ee664078d688ed8630f5b377418d63110b6bb1129ca815ab136e42057bf59b" exitCode=0 Nov 28 16:55:17 crc kubenswrapper[4647]: I1128 16:55:17.256583 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"b1ee664078d688ed8630f5b377418d63110b6bb1129ca815ab136e42057bf59b"} Nov 28 16:55:17 crc kubenswrapper[4647]: I1128 16:55:17.256625 4647 scope.go:117] "RemoveContainer" containerID="cf90f96f3d31a9ec4976c5b238d9007232250ab23f6c36660ad635b0920b2ff9" Nov 28 16:55:18 crc kubenswrapper[4647]: I1128 16:55:18.266258 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea"} Nov 28 16:56:01 crc kubenswrapper[4647]: I1128 16:56:01.658905 4647 generic.go:334] "Generic (PLEG): container finished" podID="39972479-1286-4dfb-84c2-21df11b21c3d" containerID="5f6c74f2a576d6183a39bd4d48e00e95610150f65ee55e7d6f054d74a2a7144c" exitCode=0 Nov 28 16:56:01 crc kubenswrapper[4647]: I1128 16:56:01.659006 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" event={"ID":"39972479-1286-4dfb-84c2-21df11b21c3d","Type":"ContainerDied","Data":"5f6c74f2a576d6183a39bd4d48e00e95610150f65ee55e7d6f054d74a2a7144c"} Nov 28 16:56:02 crc kubenswrapper[4647]: I1128 16:56:02.789451 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" Nov 28 16:56:02 crc kubenswrapper[4647]: I1128 16:56:02.821149 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jqx7c/crc-debug-6x6s6"] Nov 28 16:56:02 crc kubenswrapper[4647]: I1128 16:56:02.828905 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jqx7c/crc-debug-6x6s6"] Nov 28 16:56:02 crc kubenswrapper[4647]: I1128 16:56:02.949460 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/39972479-1286-4dfb-84c2-21df11b21c3d-host\") pod \"39972479-1286-4dfb-84c2-21df11b21c3d\" (UID: \"39972479-1286-4dfb-84c2-21df11b21c3d\") " Nov 28 16:56:02 crc kubenswrapper[4647]: I1128 16:56:02.949559 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqcz9\" (UniqueName: \"kubernetes.io/projected/39972479-1286-4dfb-84c2-21df11b21c3d-kube-api-access-gqcz9\") pod \"39972479-1286-4dfb-84c2-21df11b21c3d\" (UID: \"39972479-1286-4dfb-84c2-21df11b21c3d\") " Nov 28 16:56:02 crc kubenswrapper[4647]: I1128 16:56:02.949841 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/39972479-1286-4dfb-84c2-21df11b21c3d-host" (OuterVolumeSpecName: "host") pod "39972479-1286-4dfb-84c2-21df11b21c3d" (UID: "39972479-1286-4dfb-84c2-21df11b21c3d"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:56:02 crc kubenswrapper[4647]: I1128 16:56:02.951293 4647 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/39972479-1286-4dfb-84c2-21df11b21c3d-host\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:02 crc kubenswrapper[4647]: I1128 16:56:02.959309 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39972479-1286-4dfb-84c2-21df11b21c3d-kube-api-access-gqcz9" (OuterVolumeSpecName: "kube-api-access-gqcz9") pod "39972479-1286-4dfb-84c2-21df11b21c3d" (UID: "39972479-1286-4dfb-84c2-21df11b21c3d"). InnerVolumeSpecName "kube-api-access-gqcz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:03 crc kubenswrapper[4647]: I1128 16:56:03.053270 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqcz9\" (UniqueName: \"kubernetes.io/projected/39972479-1286-4dfb-84c2-21df11b21c3d-kube-api-access-gqcz9\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:03 crc kubenswrapper[4647]: I1128 16:56:03.682762 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="836fc3a8706d6a5855970cc8de2c9409e2e69734c08ed041e65efca63a15b58e" Nov 28 16:56:03 crc kubenswrapper[4647]: I1128 16:56:03.683151 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-6x6s6" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.041895 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jqx7c/crc-debug-p48xg"] Nov 28 16:56:04 crc kubenswrapper[4647]: E1128 16:56:04.042436 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39972479-1286-4dfb-84c2-21df11b21c3d" containerName="container-00" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.042456 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="39972479-1286-4dfb-84c2-21df11b21c3d" containerName="container-00" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.042655 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="39972479-1286-4dfb-84c2-21df11b21c3d" containerName="container-00" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.043538 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-p48xg" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.076785 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f4e7f700-b86d-4e80-85f7-7367c54c3d68-host\") pod \"crc-debug-p48xg\" (UID: \"f4e7f700-b86d-4e80-85f7-7367c54c3d68\") " pod="openshift-must-gather-jqx7c/crc-debug-p48xg" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.076969 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddrkv\" (UniqueName: \"kubernetes.io/projected/f4e7f700-b86d-4e80-85f7-7367c54c3d68-kube-api-access-ddrkv\") pod \"crc-debug-p48xg\" (UID: \"f4e7f700-b86d-4e80-85f7-7367c54c3d68\") " pod="openshift-must-gather-jqx7c/crc-debug-p48xg" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.179958 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f4e7f700-b86d-4e80-85f7-7367c54c3d68-host\") pod \"crc-debug-p48xg\" (UID: \"f4e7f700-b86d-4e80-85f7-7367c54c3d68\") " pod="openshift-must-gather-jqx7c/crc-debug-p48xg" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.180043 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddrkv\" (UniqueName: \"kubernetes.io/projected/f4e7f700-b86d-4e80-85f7-7367c54c3d68-kube-api-access-ddrkv\") pod \"crc-debug-p48xg\" (UID: \"f4e7f700-b86d-4e80-85f7-7367c54c3d68\") " pod="openshift-must-gather-jqx7c/crc-debug-p48xg" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.180127 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f4e7f700-b86d-4e80-85f7-7367c54c3d68-host\") pod \"crc-debug-p48xg\" (UID: \"f4e7f700-b86d-4e80-85f7-7367c54c3d68\") " pod="openshift-must-gather-jqx7c/crc-debug-p48xg" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.201261 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddrkv\" (UniqueName: \"kubernetes.io/projected/f4e7f700-b86d-4e80-85f7-7367c54c3d68-kube-api-access-ddrkv\") pod \"crc-debug-p48xg\" (UID: \"f4e7f700-b86d-4e80-85f7-7367c54c3d68\") " pod="openshift-must-gather-jqx7c/crc-debug-p48xg" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.362778 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-p48xg" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.412006 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39972479-1286-4dfb-84c2-21df11b21c3d" path="/var/lib/kubelet/pods/39972479-1286-4dfb-84c2-21df11b21c3d/volumes" Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.690812 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/crc-debug-p48xg" event={"ID":"f4e7f700-b86d-4e80-85f7-7367c54c3d68","Type":"ContainerStarted","Data":"4856d40c6bc43034ee356d069c548b3dee7c528d4935d4e8cddbf67092e2138b"} Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.690875 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/crc-debug-p48xg" event={"ID":"f4e7f700-b86d-4e80-85f7-7367c54c3d68","Type":"ContainerStarted","Data":"d0b6832cf1b15afd1a882334b8ec5281afa45d2e555326a7db9015476222472b"} Nov 28 16:56:04 crc kubenswrapper[4647]: I1128 16:56:04.705084 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-jqx7c/crc-debug-p48xg" podStartSLOduration=0.705066496 podStartE2EDuration="705.066496ms" podCreationTimestamp="2025-11-28 16:56:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:04.703621387 +0000 UTC m=+5494.551227818" watchObservedRunningTime="2025-11-28 16:56:04.705066496 +0000 UTC m=+5494.552672907" Nov 28 16:56:05 crc kubenswrapper[4647]: I1128 16:56:05.700323 4647 generic.go:334] "Generic (PLEG): container finished" podID="f4e7f700-b86d-4e80-85f7-7367c54c3d68" containerID="4856d40c6bc43034ee356d069c548b3dee7c528d4935d4e8cddbf67092e2138b" exitCode=0 Nov 28 16:56:05 crc kubenswrapper[4647]: I1128 16:56:05.700383 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/crc-debug-p48xg" event={"ID":"f4e7f700-b86d-4e80-85f7-7367c54c3d68","Type":"ContainerDied","Data":"4856d40c6bc43034ee356d069c548b3dee7c528d4935d4e8cddbf67092e2138b"} Nov 28 16:56:06 crc kubenswrapper[4647]: I1128 16:56:06.833690 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-p48xg" Nov 28 16:56:06 crc kubenswrapper[4647]: I1128 16:56:06.932147 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddrkv\" (UniqueName: \"kubernetes.io/projected/f4e7f700-b86d-4e80-85f7-7367c54c3d68-kube-api-access-ddrkv\") pod \"f4e7f700-b86d-4e80-85f7-7367c54c3d68\" (UID: \"f4e7f700-b86d-4e80-85f7-7367c54c3d68\") " Nov 28 16:56:06 crc kubenswrapper[4647]: I1128 16:56:06.932296 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f4e7f700-b86d-4e80-85f7-7367c54c3d68-host\") pod \"f4e7f700-b86d-4e80-85f7-7367c54c3d68\" (UID: \"f4e7f700-b86d-4e80-85f7-7367c54c3d68\") " Nov 28 16:56:06 crc kubenswrapper[4647]: I1128 16:56:06.933127 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4e7f700-b86d-4e80-85f7-7367c54c3d68-host" (OuterVolumeSpecName: "host") pod "f4e7f700-b86d-4e80-85f7-7367c54c3d68" (UID: "f4e7f700-b86d-4e80-85f7-7367c54c3d68"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:56:06 crc kubenswrapper[4647]: I1128 16:56:06.943642 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4e7f700-b86d-4e80-85f7-7367c54c3d68-kube-api-access-ddrkv" (OuterVolumeSpecName: "kube-api-access-ddrkv") pod "f4e7f700-b86d-4e80-85f7-7367c54c3d68" (UID: "f4e7f700-b86d-4e80-85f7-7367c54c3d68"). InnerVolumeSpecName "kube-api-access-ddrkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:07 crc kubenswrapper[4647]: I1128 16:56:07.002243 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jqx7c/crc-debug-p48xg"] Nov 28 16:56:07 crc kubenswrapper[4647]: I1128 16:56:07.015391 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jqx7c/crc-debug-p48xg"] Nov 28 16:56:07 crc kubenswrapper[4647]: I1128 16:56:07.034672 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddrkv\" (UniqueName: \"kubernetes.io/projected/f4e7f700-b86d-4e80-85f7-7367c54c3d68-kube-api-access-ddrkv\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:07 crc kubenswrapper[4647]: I1128 16:56:07.034727 4647 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f4e7f700-b86d-4e80-85f7-7367c54c3d68-host\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:07 crc kubenswrapper[4647]: I1128 16:56:07.718859 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0b6832cf1b15afd1a882334b8ec5281afa45d2e555326a7db9015476222472b" Nov 28 16:56:07 crc kubenswrapper[4647]: I1128 16:56:07.718914 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-p48xg" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.322081 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jqx7c/crc-debug-njxx7"] Nov 28 16:56:08 crc kubenswrapper[4647]: E1128 16:56:08.322581 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e7f700-b86d-4e80-85f7-7367c54c3d68" containerName="container-00" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.322595 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e7f700-b86d-4e80-85f7-7367c54c3d68" containerName="container-00" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.324894 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4e7f700-b86d-4e80-85f7-7367c54c3d68" containerName="container-00" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.328127 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-njxx7" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.420136 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4e7f700-b86d-4e80-85f7-7367c54c3d68" path="/var/lib/kubelet/pods/f4e7f700-b86d-4e80-85f7-7367c54c3d68/volumes" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.473663 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/772dfaf9-c4aa-4264-a15d-fbf19380f275-host\") pod \"crc-debug-njxx7\" (UID: \"772dfaf9-c4aa-4264-a15d-fbf19380f275\") " pod="openshift-must-gather-jqx7c/crc-debug-njxx7" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.474038 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqnf6\" (UniqueName: \"kubernetes.io/projected/772dfaf9-c4aa-4264-a15d-fbf19380f275-kube-api-access-pqnf6\") pod \"crc-debug-njxx7\" (UID: \"772dfaf9-c4aa-4264-a15d-fbf19380f275\") " pod="openshift-must-gather-jqx7c/crc-debug-njxx7" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.576133 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/772dfaf9-c4aa-4264-a15d-fbf19380f275-host\") pod \"crc-debug-njxx7\" (UID: \"772dfaf9-c4aa-4264-a15d-fbf19380f275\") " pod="openshift-must-gather-jqx7c/crc-debug-njxx7" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.576538 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/772dfaf9-c4aa-4264-a15d-fbf19380f275-host\") pod \"crc-debug-njxx7\" (UID: \"772dfaf9-c4aa-4264-a15d-fbf19380f275\") " pod="openshift-must-gather-jqx7c/crc-debug-njxx7" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.576841 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqnf6\" (UniqueName: \"kubernetes.io/projected/772dfaf9-c4aa-4264-a15d-fbf19380f275-kube-api-access-pqnf6\") pod \"crc-debug-njxx7\" (UID: \"772dfaf9-c4aa-4264-a15d-fbf19380f275\") " pod="openshift-must-gather-jqx7c/crc-debug-njxx7" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.596540 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqnf6\" (UniqueName: \"kubernetes.io/projected/772dfaf9-c4aa-4264-a15d-fbf19380f275-kube-api-access-pqnf6\") pod \"crc-debug-njxx7\" (UID: \"772dfaf9-c4aa-4264-a15d-fbf19380f275\") " pod="openshift-must-gather-jqx7c/crc-debug-njxx7" Nov 28 16:56:08 crc kubenswrapper[4647]: I1128 16:56:08.692477 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-njxx7" Nov 28 16:56:09 crc kubenswrapper[4647]: I1128 16:56:09.741174 4647 generic.go:334] "Generic (PLEG): container finished" podID="772dfaf9-c4aa-4264-a15d-fbf19380f275" containerID="b4f8b90f07d255767066b59f53509d216f91c0877ba902606434bace369b49e6" exitCode=0 Nov 28 16:56:09 crc kubenswrapper[4647]: I1128 16:56:09.741260 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/crc-debug-njxx7" event={"ID":"772dfaf9-c4aa-4264-a15d-fbf19380f275","Type":"ContainerDied","Data":"b4f8b90f07d255767066b59f53509d216f91c0877ba902606434bace369b49e6"} Nov 28 16:56:09 crc kubenswrapper[4647]: I1128 16:56:09.741467 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/crc-debug-njxx7" event={"ID":"772dfaf9-c4aa-4264-a15d-fbf19380f275","Type":"ContainerStarted","Data":"2756cbca5917607387666e6dfcab359b313b14b2dda5e0fe8d071a77e1707ff1"} Nov 28 16:56:09 crc kubenswrapper[4647]: I1128 16:56:09.777669 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jqx7c/crc-debug-njxx7"] Nov 28 16:56:09 crc kubenswrapper[4647]: I1128 16:56:09.792751 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jqx7c/crc-debug-njxx7"] Nov 28 16:56:10 crc kubenswrapper[4647]: I1128 16:56:10.844164 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-njxx7" Nov 28 16:56:11 crc kubenswrapper[4647]: I1128 16:56:11.040774 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqnf6\" (UniqueName: \"kubernetes.io/projected/772dfaf9-c4aa-4264-a15d-fbf19380f275-kube-api-access-pqnf6\") pod \"772dfaf9-c4aa-4264-a15d-fbf19380f275\" (UID: \"772dfaf9-c4aa-4264-a15d-fbf19380f275\") " Nov 28 16:56:11 crc kubenswrapper[4647]: I1128 16:56:11.040896 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/772dfaf9-c4aa-4264-a15d-fbf19380f275-host\") pod \"772dfaf9-c4aa-4264-a15d-fbf19380f275\" (UID: \"772dfaf9-c4aa-4264-a15d-fbf19380f275\") " Nov 28 16:56:11 crc kubenswrapper[4647]: I1128 16:56:11.040958 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/772dfaf9-c4aa-4264-a15d-fbf19380f275-host" (OuterVolumeSpecName: "host") pod "772dfaf9-c4aa-4264-a15d-fbf19380f275" (UID: "772dfaf9-c4aa-4264-a15d-fbf19380f275"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:56:11 crc kubenswrapper[4647]: I1128 16:56:11.041508 4647 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/772dfaf9-c4aa-4264-a15d-fbf19380f275-host\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:11 crc kubenswrapper[4647]: I1128 16:56:11.052702 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/772dfaf9-c4aa-4264-a15d-fbf19380f275-kube-api-access-pqnf6" (OuterVolumeSpecName: "kube-api-access-pqnf6") pod "772dfaf9-c4aa-4264-a15d-fbf19380f275" (UID: "772dfaf9-c4aa-4264-a15d-fbf19380f275"). InnerVolumeSpecName "kube-api-access-pqnf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:11 crc kubenswrapper[4647]: I1128 16:56:11.143944 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqnf6\" (UniqueName: \"kubernetes.io/projected/772dfaf9-c4aa-4264-a15d-fbf19380f275-kube-api-access-pqnf6\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:11 crc kubenswrapper[4647]: I1128 16:56:11.759740 4647 scope.go:117] "RemoveContainer" containerID="b4f8b90f07d255767066b59f53509d216f91c0877ba902606434bace369b49e6" Nov 28 16:56:11 crc kubenswrapper[4647]: I1128 16:56:11.759759 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/crc-debug-njxx7" Nov 28 16:56:12 crc kubenswrapper[4647]: I1128 16:56:12.404355 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="772dfaf9-c4aa-4264-a15d-fbf19380f275" path="/var/lib/kubelet/pods/772dfaf9-c4aa-4264-a15d-fbf19380f275/volumes" Nov 28 16:56:46 crc kubenswrapper[4647]: I1128 16:56:46.500292 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-8f8ccd5d4-thgh2_cad9ee6e-4bee-49c6-9f24-7c97e6e745ed/barbican-api/0.log" Nov 28 16:56:46 crc kubenswrapper[4647]: I1128 16:56:46.689619 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-8f8ccd5d4-thgh2_cad9ee6e-4bee-49c6-9f24-7c97e6e745ed/barbican-api-log/0.log" Nov 28 16:56:46 crc kubenswrapper[4647]: I1128 16:56:46.736176 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-68d4467b78-mhh9d_a70c88e0-3df8-484f-8343-2bf87f6c9f33/barbican-keystone-listener/0.log" Nov 28 16:56:46 crc kubenswrapper[4647]: I1128 16:56:46.888270 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-68d4467b78-mhh9d_a70c88e0-3df8-484f-8343-2bf87f6c9f33/barbican-keystone-listener-log/0.log" Nov 28 16:56:47 crc kubenswrapper[4647]: I1128 16:56:47.050397 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-9c6c77dd7-h6hcl_52ae86fe-ca94-41f0-880a-d957edd96160/barbican-worker/0.log" Nov 28 16:56:47 crc kubenswrapper[4647]: I1128 16:56:47.169688 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-9c6c77dd7-h6hcl_52ae86fe-ca94-41f0-880a-d957edd96160/barbican-worker-log/0.log" Nov 28 16:56:47 crc kubenswrapper[4647]: I1128 16:56:47.235941 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-6q96s_60d6a4cd-44ae-46ff-a980-a81ddab3b98c/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:47 crc kubenswrapper[4647]: I1128 16:56:47.546825 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_aa9888f9-1fbf-439a-978c-77b304679edf/proxy-httpd/0.log" Nov 28 16:56:47 crc kubenswrapper[4647]: I1128 16:56:47.647681 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_aa9888f9-1fbf-439a-978c-77b304679edf/ceilometer-notification-agent/0.log" Nov 28 16:56:47 crc kubenswrapper[4647]: I1128 16:56:47.657461 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_aa9888f9-1fbf-439a-978c-77b304679edf/ceilometer-central-agent/0.log" Nov 28 16:56:47 crc kubenswrapper[4647]: I1128 16:56:47.765240 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_aa9888f9-1fbf-439a-978c-77b304679edf/sg-core/0.log" Nov 28 16:56:48 crc kubenswrapper[4647]: I1128 16:56:48.107232 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_0f64718e-70bf-4d38-8c02-0523053f5e99/cinder-api/0.log" Nov 28 16:56:48 crc kubenswrapper[4647]: I1128 16:56:48.148763 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_0f64718e-70bf-4d38-8c02-0523053f5e99/cinder-api-log/0.log" Nov 28 16:56:48 crc kubenswrapper[4647]: I1128 16:56:48.348349 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_a6eb59e0-a2aa-49d0-a662-8e37f51004ef/cinder-scheduler/0.log" Nov 28 16:56:48 crc kubenswrapper[4647]: I1128 16:56:48.405679 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_a6eb59e0-a2aa-49d0-a662-8e37f51004ef/probe/0.log" Nov 28 16:56:48 crc kubenswrapper[4647]: I1128 16:56:48.450576 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-flpg5_77715da8-feee-451f-a972-a2e52884582a/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:48 crc kubenswrapper[4647]: I1128 16:56:48.797193 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-vk8vb_a8d3c439-b6ee-42bd-96d2-eb725c996b97/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:48 crc kubenswrapper[4647]: I1128 16:56:48.915351 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-667c9c995c-qp6ls_c06de7d8-7b65-4a7b-876c-0049182a2ec0/init/0.log" Nov 28 16:56:49 crc kubenswrapper[4647]: I1128 16:56:49.102257 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-667c9c995c-qp6ls_c06de7d8-7b65-4a7b-876c-0049182a2ec0/init/0.log" Nov 28 16:56:49 crc kubenswrapper[4647]: I1128 16:56:49.128224 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-q5g9z_aa17a444-8971-4590-a7a5-9d303c00b90e/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:49 crc kubenswrapper[4647]: I1128 16:56:49.350461 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-667c9c995c-qp6ls_c06de7d8-7b65-4a7b-876c-0049182a2ec0/dnsmasq-dns/0.log" Nov 28 16:56:49 crc kubenswrapper[4647]: I1128 16:56:49.471675 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_cf5f0db0-688a-43f4-b38e-8478858003fa/glance-httpd/0.log" Nov 28 16:56:49 crc kubenswrapper[4647]: I1128 16:56:49.652427 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_cf5f0db0-688a-43f4-b38e-8478858003fa/glance-log/0.log" Nov 28 16:56:49 crc kubenswrapper[4647]: I1128 16:56:49.699838 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_38dcdbad-1599-4387-8587-6676317adbc3/glance-httpd/0.log" Nov 28 16:56:49 crc kubenswrapper[4647]: I1128 16:56:49.734028 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_38dcdbad-1599-4387-8587-6676317adbc3/glance-log/0.log" Nov 28 16:56:50 crc kubenswrapper[4647]: I1128 16:56:50.004080 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-66c6c46cdb-xgv7h_278aef39-0aaf-4d33-b167-0f0cca8248fd/horizon/2.log" Nov 28 16:56:50 crc kubenswrapper[4647]: I1128 16:56:50.151008 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-66c6c46cdb-xgv7h_278aef39-0aaf-4d33-b167-0f0cca8248fd/horizon/1.log" Nov 28 16:56:50 crc kubenswrapper[4647]: I1128 16:56:50.464504 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-d78nq_4cb1ba80-4e50-4753-9887-3e420c825d2a/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:50 crc kubenswrapper[4647]: I1128 16:56:50.598574 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-66c6c46cdb-xgv7h_278aef39-0aaf-4d33-b167-0f0cca8248fd/horizon-log/0.log" Nov 28 16:56:50 crc kubenswrapper[4647]: I1128 16:56:50.673327 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-nklbp_cbde4c89-6c68-4422-931b-94507dc5376d/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:51 crc kubenswrapper[4647]: I1128 16:56:51.013664 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29405761-4rc8f_03bc8de1-2028-4b26-bf81-c51d09cf6a71/keystone-cron/0.log" Nov 28 16:56:51 crc kubenswrapper[4647]: I1128 16:56:51.200771 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-8567d4b5-5ss7w_317b992c-8c2d-4838-bfbf-6debefd73d0a/keystone-api/0.log" Nov 28 16:56:51 crc kubenswrapper[4647]: I1128 16:56:51.280436 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_bbd46dc1-09cb-44e2-8150-a1f512a3efc9/kube-state-metrics/0.log" Nov 28 16:56:51 crc kubenswrapper[4647]: I1128 16:56:51.445317 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-vlbt5_93e3bb5f-ef6f-44de-9f2c-aa13871df572/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:52 crc kubenswrapper[4647]: I1128 16:56:52.086383 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-knlgc_1180d1cb-f9bc-4646-864d-0bdea17fd99f/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:52 crc kubenswrapper[4647]: I1128 16:56:52.227511 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-76d7749889-z87rt_8e65fe60-2d61-4066-aed7-6e211c8f2096/neutron-httpd/0.log" Nov 28 16:56:52 crc kubenswrapper[4647]: I1128 16:56:52.340799 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-76d7749889-z87rt_8e65fe60-2d61-4066-aed7-6e211c8f2096/neutron-api/0.log" Nov 28 16:56:53 crc kubenswrapper[4647]: I1128 16:56:53.039056 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_29b2bc60-27b4-48ee-b7d5-39a9c9648c03/nova-cell0-conductor-conductor/0.log" Nov 28 16:56:53 crc kubenswrapper[4647]: I1128 16:56:53.298806 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_a641c2b8-f7d8-4829-8d49-d8eff2e2d132/nova-cell1-conductor-conductor/0.log" Nov 28 16:56:53 crc kubenswrapper[4647]: I1128 16:56:53.836524 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_ee715495-598f-4c76-9399-92846d682bbe/nova-cell1-novncproxy-novncproxy/0.log" Nov 28 16:56:53 crc kubenswrapper[4647]: I1128 16:56:53.923521 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_4d763768-9218-4866-94d2-1197f5e81fce/nova-api-log/0.log" Nov 28 16:56:53 crc kubenswrapper[4647]: I1128 16:56:53.955149 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-p69xl_338f9128-79ea-4cda-b4e8-7664e6057225/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:54 crc kubenswrapper[4647]: I1128 16:56:54.335688 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f/nova-metadata-log/0.log" Nov 28 16:56:54 crc kubenswrapper[4647]: I1128 16:56:54.477571 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_4d763768-9218-4866-94d2-1197f5e81fce/nova-api-api/0.log" Nov 28 16:56:54 crc kubenswrapper[4647]: I1128 16:56:54.721316 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0f876088-07c2-4cb0-8096-681aaf594d6a/mysql-bootstrap/0.log" Nov 28 16:56:54 crc kubenswrapper[4647]: I1128 16:56:54.995537 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0f876088-07c2-4cb0-8096-681aaf594d6a/mysql-bootstrap/0.log" Nov 28 16:56:55 crc kubenswrapper[4647]: I1128 16:56:55.037872 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0f876088-07c2-4cb0-8096-681aaf594d6a/galera/0.log" Nov 28 16:56:55 crc kubenswrapper[4647]: I1128 16:56:55.134067 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_7597bf4e-3cd6-4adb-8723-8a86aaf60a05/nova-scheduler-scheduler/0.log" Nov 28 16:56:55 crc kubenswrapper[4647]: I1128 16:56:55.330191 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_63a87633-1166-4787-99ee-ec4a5fd02b87/mysql-bootstrap/0.log" Nov 28 16:56:55 crc kubenswrapper[4647]: I1128 16:56:55.596058 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_63a87633-1166-4787-99ee-ec4a5fd02b87/mysql-bootstrap/0.log" Nov 28 16:56:55 crc kubenswrapper[4647]: I1128 16:56:55.607298 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_63a87633-1166-4787-99ee-ec4a5fd02b87/galera/0.log" Nov 28 16:56:55 crc kubenswrapper[4647]: I1128 16:56:55.812100 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_c765a2ba-ed3c-471b-8794-1623c126f0f2/openstackclient/0.log" Nov 28 16:56:55 crc kubenswrapper[4647]: I1128 16:56:55.928251 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-4psvg_fab755a9-f20f-4bc6-a7e2-353396a0ce74/ovn-controller/0.log" Nov 28 16:56:56 crc kubenswrapper[4647]: I1128 16:56:56.181811 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-zql68_083c881c-8e40-4d03-b4f1-91af7bcd2cd1/openstack-network-exporter/0.log" Nov 28 16:56:56 crc kubenswrapper[4647]: I1128 16:56:56.452601 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vh9b5_a5d1c6f3-2c4f-4046-81fc-c2e210100c4b/ovsdb-server-init/0.log" Nov 28 16:56:56 crc kubenswrapper[4647]: I1128 16:56:56.551726 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_fe41f81d-e4cf-4c08-a00f-c1f41eb6d04f/nova-metadata-metadata/0.log" Nov 28 16:56:56 crc kubenswrapper[4647]: I1128 16:56:56.616478 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vh9b5_a5d1c6f3-2c4f-4046-81fc-c2e210100c4b/ovsdb-server-init/0.log" Nov 28 16:56:56 crc kubenswrapper[4647]: I1128 16:56:56.699170 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vh9b5_a5d1c6f3-2c4f-4046-81fc-c2e210100c4b/ovsdb-server/0.log" Nov 28 16:56:56 crc kubenswrapper[4647]: I1128 16:56:56.756500 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vh9b5_a5d1c6f3-2c4f-4046-81fc-c2e210100c4b/ovs-vswitchd/0.log" Nov 28 16:56:56 crc kubenswrapper[4647]: I1128 16:56:56.911725 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-rrkrm_7123f6db-1e1e-4bfb-97ca-f142f6cdb13a/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:57 crc kubenswrapper[4647]: I1128 16:56:57.054750 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f5edb07c-df1f-4434-8608-97841a748dd2/openstack-network-exporter/0.log" Nov 28 16:56:57 crc kubenswrapper[4647]: I1128 16:56:57.131492 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f5edb07c-df1f-4434-8608-97841a748dd2/ovn-northd/0.log" Nov 28 16:56:57 crc kubenswrapper[4647]: I1128 16:56:57.312127 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_de3257de-cbde-4dca-89c7-21af1617cc66/ovsdbserver-nb/0.log" Nov 28 16:56:57 crc kubenswrapper[4647]: I1128 16:56:57.394934 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_de3257de-cbde-4dca-89c7-21af1617cc66/openstack-network-exporter/0.log" Nov 28 16:56:57 crc kubenswrapper[4647]: I1128 16:56:57.480057 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0/openstack-network-exporter/0.log" Nov 28 16:56:57 crc kubenswrapper[4647]: I1128 16:56:57.565460 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_cbf8b62e-4172-48bb-aeb8-a3f6e6cffdb0/ovsdbserver-sb/0.log" Nov 28 16:56:58 crc kubenswrapper[4647]: I1128 16:56:57.993265 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6565667588-kf4hg_cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407/placement-api/0.log" Nov 28 16:56:58 crc kubenswrapper[4647]: I1128 16:56:58.060372 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_271d0057-21bf-4899-9284-d8d2beb015b6/setup-container/0.log" Nov 28 16:56:58 crc kubenswrapper[4647]: I1128 16:56:58.100523 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6565667588-kf4hg_cf1f6b4e-174b-4f54-8f1c-1c55cb9b3407/placement-log/0.log" Nov 28 16:56:58 crc kubenswrapper[4647]: I1128 16:56:58.255735 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_271d0057-21bf-4899-9284-d8d2beb015b6/setup-container/0.log" Nov 28 16:56:58 crc kubenswrapper[4647]: I1128 16:56:58.303849 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_271d0057-21bf-4899-9284-d8d2beb015b6/rabbitmq/0.log" Nov 28 16:56:58 crc kubenswrapper[4647]: I1128 16:56:58.474381 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_50660b1a-39a9-4ada-a275-a068d6b406bf/setup-container/0.log" Nov 28 16:56:58 crc kubenswrapper[4647]: I1128 16:56:58.595611 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_50660b1a-39a9-4ada-a275-a068d6b406bf/setup-container/0.log" Nov 28 16:56:58 crc kubenswrapper[4647]: I1128 16:56:58.741647 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_50660b1a-39a9-4ada-a275-a068d6b406bf/rabbitmq/0.log" Nov 28 16:56:58 crc kubenswrapper[4647]: I1128 16:56:58.826185 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-lzh8l_8cfdffa4-b728-4135-a613-7198ffda163d/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:59 crc kubenswrapper[4647]: I1128 16:56:59.055981 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-f5vtp_ee739c78-f4a0-46eb-a0ca-a7bcab007c16/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:59 crc kubenswrapper[4647]: I1128 16:56:59.117941 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-hjmk9_54d11771-921e-4086-a17f-c853026c4a3e/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:59 crc kubenswrapper[4647]: I1128 16:56:59.333617 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-tkvrh_965d18f9-ce83-44f5-8ec7-4b13eefa7e30/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:56:59 crc kubenswrapper[4647]: I1128 16:56:59.463676 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-k8pmm_e27e0d42-24a3-447f-aa49-fc305e1253c0/ssh-known-hosts-edpm-deployment/0.log" Nov 28 16:56:59 crc kubenswrapper[4647]: I1128 16:56:59.779535 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-dj6b9_b098c625-f531-4a3a-8532-fbfc7cd4f236/swift-ring-rebalance/0.log" Nov 28 16:56:59 crc kubenswrapper[4647]: I1128 16:56:59.796010 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6f577f58dc-7rp75_c5168b52-c295-45d6-aa36-932b5bb95a97/proxy-server/0.log" Nov 28 16:56:59 crc kubenswrapper[4647]: I1128 16:56:59.934581 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6f577f58dc-7rp75_c5168b52-c295-45d6-aa36-932b5bb95a97/proxy-httpd/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.084072 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/account-auditor/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.128054 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/account-reaper/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.345204 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/account-server/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.355372 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/account-replicator/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.402848 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/container-auditor/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.470844 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/container-replicator/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.549525 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/container-server/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.667614 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/container-updater/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.713511 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/object-auditor/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.782509 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/object-expirer/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.908562 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/object-replicator/0.log" Nov 28 16:57:00 crc kubenswrapper[4647]: I1128 16:57:00.975917 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/object-updater/0.log" Nov 28 16:57:01 crc kubenswrapper[4647]: I1128 16:57:01.018311 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/object-server/0.log" Nov 28 16:57:01 crc kubenswrapper[4647]: I1128 16:57:01.086709 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/rsync/0.log" Nov 28 16:57:01 crc kubenswrapper[4647]: I1128 16:57:01.155140 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_ab7ffd14-cb79-40b8-854d-1dd1deca75f2/swift-recon-cron/0.log" Nov 28 16:57:01 crc kubenswrapper[4647]: I1128 16:57:01.378162 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-wghtm_c1d8e071-fad7-4b8d-8637-e7be304c4c86/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:57:01 crc kubenswrapper[4647]: I1128 16:57:01.503241 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_5f2af68b-575d-469b-ab8d-7f16dfadc0d7/tempest-tests-tempest-tests-runner/0.log" Nov 28 16:57:01 crc kubenswrapper[4647]: I1128 16:57:01.701765 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_ab53bb1b-cb66-4ef6-ae17-a4aab0e7ec99/test-operator-logs-container/0.log" Nov 28 16:57:01 crc kubenswrapper[4647]: I1128 16:57:01.794839 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-4f8rv_123e6be6-0c87-4a2b-8f94-ae8207ccbaa5/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 16:57:02 crc kubenswrapper[4647]: I1128 16:57:02.308600 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_b7a9cfb5-c9cd-45ee-906e-70926173aa87/memcached/0.log" Nov 28 16:57:17 crc kubenswrapper[4647]: I1128 16:57:17.022611 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:57:17 crc kubenswrapper[4647]: I1128 16:57:17.023018 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:57:27 crc kubenswrapper[4647]: I1128 16:57:27.932904 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/util/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.170198 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/util/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.176775 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/pull/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.220918 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/pull/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.370387 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/util/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.392685 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/pull/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.436308 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4582fw_efdd2a38-2a54-411c-8463-3adc8f3cd634/extract/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.592725 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-qbdbz_c27f5305-5c04-401d-b53e-ca2df0999cfd/kube-rbac-proxy/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.726260 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-qbdbz_c27f5305-5c04-401d-b53e-ca2df0999cfd/manager/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.773609 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-s88h5_205cebfd-f183-486f-965f-ab494cae35dd/kube-rbac-proxy/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.886772 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-s88h5_205cebfd-f183-486f-965f-ab494cae35dd/manager/0.log" Nov 28 16:57:28 crc kubenswrapper[4647]: I1128 16:57:28.993815 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-687dh_022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619/manager/0.log" Nov 28 16:57:29 crc kubenswrapper[4647]: I1128 16:57:29.016761 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-687dh_022ea8dc-26d4-4f7f-bbf0-0b5d4bc58619/kube-rbac-proxy/0.log" Nov 28 16:57:29 crc kubenswrapper[4647]: I1128 16:57:29.220952 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-85fbd69fcd-kdwxt_62caff02-44e5-4ae9-8879-e588e2ec2c26/manager/0.log" Nov 28 16:57:29 crc kubenswrapper[4647]: I1128 16:57:29.230202 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-85fbd69fcd-kdwxt_62caff02-44e5-4ae9-8879-e588e2ec2c26/kube-rbac-proxy/0.log" Nov 28 16:57:29 crc kubenswrapper[4647]: I1128 16:57:29.392668 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-cpbpp_2698b76b-928c-4d48-bf4e-e03df478867a/kube-rbac-proxy/0.log" Nov 28 16:57:29 crc kubenswrapper[4647]: I1128 16:57:29.434135 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-cpbpp_2698b76b-928c-4d48-bf4e-e03df478867a/manager/0.log" Nov 28 16:57:29 crc kubenswrapper[4647]: I1128 16:57:29.545169 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-w2446_43fb88ed-c57b-412e-a210-49ce2e7f8848/kube-rbac-proxy/0.log" Nov 28 16:57:29 crc kubenswrapper[4647]: I1128 16:57:29.655913 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-w2446_43fb88ed-c57b-412e-a210-49ce2e7f8848/manager/0.log" Nov 28 16:57:29 crc kubenswrapper[4647]: I1128 16:57:29.718649 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-v9m4v_93230429-04c5-45a9-81c5-dab4213025d4/kube-rbac-proxy/0.log" Nov 28 16:57:29 crc kubenswrapper[4647]: I1128 16:57:29.966798 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-vl88c_6576f1dc-a847-446f-a228-d287036b2d56/kube-rbac-proxy/0.log" Nov 28 16:57:29 crc kubenswrapper[4647]: I1128 16:57:29.990561 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-v9m4v_93230429-04c5-45a9-81c5-dab4213025d4/manager/0.log" Nov 28 16:57:30 crc kubenswrapper[4647]: I1128 16:57:30.076499 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-vl88c_6576f1dc-a847-446f-a228-d287036b2d56/manager/0.log" Nov 28 16:57:30 crc kubenswrapper[4647]: I1128 16:57:30.247567 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-x2ggd_a4c112f9-f801-4aec-b715-72b336978342/manager/0.log" Nov 28 16:57:30 crc kubenswrapper[4647]: I1128 16:57:30.261128 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-x2ggd_a4c112f9-f801-4aec-b715-72b336978342/kube-rbac-proxy/0.log" Nov 28 16:57:30 crc kubenswrapper[4647]: I1128 16:57:30.517584 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5cbc8c7f96-jnnvb_7ff4e4d2-ff33-484b-bc15-f0192f009688/manager/0.log" Nov 28 16:57:30 crc kubenswrapper[4647]: I1128 16:57:30.553253 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5cbc8c7f96-jnnvb_7ff4e4d2-ff33-484b-bc15-f0192f009688/kube-rbac-proxy/0.log" Nov 28 16:57:30 crc kubenswrapper[4647]: I1128 16:57:30.585100 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-4tgc6_d32856fc-f28a-4e36-9e9b-0d09486b8a09/kube-rbac-proxy/0.log" Nov 28 16:57:30 crc kubenswrapper[4647]: I1128 16:57:30.755036 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-4tgc6_d32856fc-f28a-4e36-9e9b-0d09486b8a09/manager/0.log" Nov 28 16:57:30 crc kubenswrapper[4647]: I1128 16:57:30.795733 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-d9tcm_7760330a-6914-44a7-9fa5-aa6e6478506a/kube-rbac-proxy/0.log" Nov 28 16:57:30 crc kubenswrapper[4647]: I1128 16:57:30.843630 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-d9tcm_7760330a-6914-44a7-9fa5-aa6e6478506a/manager/0.log" Nov 28 16:57:31 crc kubenswrapper[4647]: I1128 16:57:31.056099 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-8szbq_f71ac6d8-b917-43a7-a35c-dce863f16280/manager/0.log" Nov 28 16:57:31 crc kubenswrapper[4647]: I1128 16:57:31.119150 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-8szbq_f71ac6d8-b917-43a7-a35c-dce863f16280/kube-rbac-proxy/0.log" Nov 28 16:57:31 crc kubenswrapper[4647]: I1128 16:57:31.317034 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-5992m_6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6/kube-rbac-proxy/0.log" Nov 28 16:57:31 crc kubenswrapper[4647]: I1128 16:57:31.318167 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-5992m_6d6d3f2a-5aa3-4f5f-89c6-2d807c55e6d6/manager/0.log" Nov 28 16:57:31 crc kubenswrapper[4647]: I1128 16:57:31.333930 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-zg7rq_873a1114-80f7-43f8-b6de-b69a7a152411/kube-rbac-proxy/0.log" Nov 28 16:57:31 crc kubenswrapper[4647]: I1128 16:57:31.495478 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-zg7rq_873a1114-80f7-43f8-b6de-b69a7a152411/manager/0.log" Nov 28 16:57:31 crc kubenswrapper[4647]: I1128 16:57:31.592231 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-69699fdd55-qvs9j_a57f869c-1d71-4341-a632-870e7b3dfede/kube-rbac-proxy/0.log" Nov 28 16:57:31 crc kubenswrapper[4647]: I1128 16:57:31.887183 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-857c5c6d5d-pt9fz_c8f72013-5d98-4478-bdd9-180abb82af2c/kube-rbac-proxy/0.log" Nov 28 16:57:32 crc kubenswrapper[4647]: I1128 16:57:32.110168 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-857c5c6d5d-pt9fz_c8f72013-5d98-4478-bdd9-180abb82af2c/operator/0.log" Nov 28 16:57:32 crc kubenswrapper[4647]: I1128 16:57:32.156871 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-vtdpv_76ab5951-89d3-4ad1-8b6b-26982de63912/registry-server/0.log" Nov 28 16:57:32 crc kubenswrapper[4647]: I1128 16:57:32.446503 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-2v9x8_04af16a5-b153-433f-9c39-859c16167b0f/kube-rbac-proxy/0.log" Nov 28 16:57:32 crc kubenswrapper[4647]: I1128 16:57:32.556486 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-2v9x8_04af16a5-b153-433f-9c39-859c16167b0f/manager/0.log" Nov 28 16:57:32 crc kubenswrapper[4647]: I1128 16:57:32.727690 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-pzkc9_c2af1d24-9d02-4f14-95b7-3875382cb095/kube-rbac-proxy/0.log" Nov 28 16:57:32 crc kubenswrapper[4647]: I1128 16:57:32.773850 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-pzkc9_c2af1d24-9d02-4f14-95b7-3875382cb095/manager/0.log" Nov 28 16:57:32 crc kubenswrapper[4647]: I1128 16:57:32.892778 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-69699fdd55-qvs9j_a57f869c-1d71-4341-a632-870e7b3dfede/manager/0.log" Nov 28 16:57:32 crc kubenswrapper[4647]: I1128 16:57:32.952134 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-kvck7_1a8f857d-6498-42ba-bbc5-2bb5b2896c6e/operator/0.log" Nov 28 16:57:32 crc kubenswrapper[4647]: I1128 16:57:32.974818 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-zqz4k_4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f/kube-rbac-proxy/0.log" Nov 28 16:57:33 crc kubenswrapper[4647]: I1128 16:57:33.152384 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-zqz4k_4bc1b1a9-4da1-4778-9aee-ade9fbe4a01f/manager/0.log" Nov 28 16:57:33 crc kubenswrapper[4647]: I1128 16:57:33.176630 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-695797c565-rk4n7_5f72c046-071d-4e1c-8e12-6574bed76f27/kube-rbac-proxy/0.log" Nov 28 16:57:33 crc kubenswrapper[4647]: I1128 16:57:33.230677 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-695797c565-rk4n7_5f72c046-071d-4e1c-8e12-6574bed76f27/manager/0.log" Nov 28 16:57:33 crc kubenswrapper[4647]: I1128 16:57:33.395521 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-bb86466d8-9dc64_577fb6a4-bb39-4df2-b161-04b2ac2f44d4/kube-rbac-proxy/0.log" Nov 28 16:57:33 crc kubenswrapper[4647]: I1128 16:57:33.443642 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-bb86466d8-9dc64_577fb6a4-bb39-4df2-b161-04b2ac2f44d4/manager/0.log" Nov 28 16:57:33 crc kubenswrapper[4647]: I1128 16:57:33.456445 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-f4jfb_93200d81-c9c3-4d5e-8406-112eef462119/kube-rbac-proxy/0.log" Nov 28 16:57:33 crc kubenswrapper[4647]: I1128 16:57:33.552109 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-f4jfb_93200d81-c9c3-4d5e-8406-112eef462119/manager/0.log" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.558492 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5v478"] Nov 28 16:57:37 crc kubenswrapper[4647]: E1128 16:57:37.559465 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772dfaf9-c4aa-4264-a15d-fbf19380f275" containerName="container-00" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.559481 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="772dfaf9-c4aa-4264-a15d-fbf19380f275" containerName="container-00" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.559694 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="772dfaf9-c4aa-4264-a15d-fbf19380f275" containerName="container-00" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.561084 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.596886 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5v478"] Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.623871 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-utilities\") pod \"certified-operators-5v478\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.623926 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-catalog-content\") pod \"certified-operators-5v478\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.623974 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6d89\" (UniqueName: \"kubernetes.io/projected/cc292d91-b617-4c19-9cba-41b0906dfdd5-kube-api-access-q6d89\") pod \"certified-operators-5v478\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.725436 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-utilities\") pod \"certified-operators-5v478\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.725490 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-catalog-content\") pod \"certified-operators-5v478\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.725536 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6d89\" (UniqueName: \"kubernetes.io/projected/cc292d91-b617-4c19-9cba-41b0906dfdd5-kube-api-access-q6d89\") pod \"certified-operators-5v478\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.725994 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-utilities\") pod \"certified-operators-5v478\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.726049 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-catalog-content\") pod \"certified-operators-5v478\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.747178 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6d89\" (UniqueName: \"kubernetes.io/projected/cc292d91-b617-4c19-9cba-41b0906dfdd5-kube-api-access-q6d89\") pod \"certified-operators-5v478\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:37 crc kubenswrapper[4647]: I1128 16:57:37.915303 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:38 crc kubenswrapper[4647]: I1128 16:57:38.419683 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5v478"] Nov 28 16:57:38 crc kubenswrapper[4647]: I1128 16:57:38.644521 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5v478" event={"ID":"cc292d91-b617-4c19-9cba-41b0906dfdd5","Type":"ContainerStarted","Data":"d763c9dbd721405aee203df3a4a28e14a49c39349e08b879c43c983deb2e7140"} Nov 28 16:57:39 crc kubenswrapper[4647]: I1128 16:57:39.653573 4647 generic.go:334] "Generic (PLEG): container finished" podID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerID="80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b" exitCode=0 Nov 28 16:57:39 crc kubenswrapper[4647]: I1128 16:57:39.653932 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5v478" event={"ID":"cc292d91-b617-4c19-9cba-41b0906dfdd5","Type":"ContainerDied","Data":"80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b"} Nov 28 16:57:39 crc kubenswrapper[4647]: I1128 16:57:39.656192 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:57:40 crc kubenswrapper[4647]: I1128 16:57:40.663730 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5v478" event={"ID":"cc292d91-b617-4c19-9cba-41b0906dfdd5","Type":"ContainerStarted","Data":"48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae"} Nov 28 16:57:41 crc kubenswrapper[4647]: I1128 16:57:41.675475 4647 generic.go:334] "Generic (PLEG): container finished" podID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerID="48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae" exitCode=0 Nov 28 16:57:41 crc kubenswrapper[4647]: I1128 16:57:41.675564 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5v478" event={"ID":"cc292d91-b617-4c19-9cba-41b0906dfdd5","Type":"ContainerDied","Data":"48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae"} Nov 28 16:57:43 crc kubenswrapper[4647]: I1128 16:57:43.692885 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5v478" event={"ID":"cc292d91-b617-4c19-9cba-41b0906dfdd5","Type":"ContainerStarted","Data":"1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46"} Nov 28 16:57:43 crc kubenswrapper[4647]: I1128 16:57:43.722754 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5v478" podStartSLOduration=3.753798592 podStartE2EDuration="6.722739082s" podCreationTimestamp="2025-11-28 16:57:37 +0000 UTC" firstStartedPulling="2025-11-28 16:57:39.655980169 +0000 UTC m=+5589.503586590" lastFinishedPulling="2025-11-28 16:57:42.624920659 +0000 UTC m=+5592.472527080" observedRunningTime="2025-11-28 16:57:43.717732218 +0000 UTC m=+5593.565338639" watchObservedRunningTime="2025-11-28 16:57:43.722739082 +0000 UTC m=+5593.570345493" Nov 28 16:57:47 crc kubenswrapper[4647]: I1128 16:57:47.023399 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:57:47 crc kubenswrapper[4647]: I1128 16:57:47.023824 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:57:47 crc kubenswrapper[4647]: I1128 16:57:47.916095 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:47 crc kubenswrapper[4647]: I1128 16:57:47.916228 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:47 crc kubenswrapper[4647]: I1128 16:57:47.971336 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:48 crc kubenswrapper[4647]: I1128 16:57:48.802765 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:48 crc kubenswrapper[4647]: I1128 16:57:48.865391 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5v478"] Nov 28 16:57:50 crc kubenswrapper[4647]: I1128 16:57:50.760092 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5v478" podUID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerName="registry-server" containerID="cri-o://1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46" gracePeriod=2 Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.289043 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.338870 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-catalog-content\") pod \"cc292d91-b617-4c19-9cba-41b0906dfdd5\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.338956 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-utilities\") pod \"cc292d91-b617-4c19-9cba-41b0906dfdd5\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.339030 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6d89\" (UniqueName: \"kubernetes.io/projected/cc292d91-b617-4c19-9cba-41b0906dfdd5-kube-api-access-q6d89\") pod \"cc292d91-b617-4c19-9cba-41b0906dfdd5\" (UID: \"cc292d91-b617-4c19-9cba-41b0906dfdd5\") " Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.340165 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-utilities" (OuterVolumeSpecName: "utilities") pod "cc292d91-b617-4c19-9cba-41b0906dfdd5" (UID: "cc292d91-b617-4c19-9cba-41b0906dfdd5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.349655 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc292d91-b617-4c19-9cba-41b0906dfdd5-kube-api-access-q6d89" (OuterVolumeSpecName: "kube-api-access-q6d89") pod "cc292d91-b617-4c19-9cba-41b0906dfdd5" (UID: "cc292d91-b617-4c19-9cba-41b0906dfdd5"). InnerVolumeSpecName "kube-api-access-q6d89". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.430256 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cc292d91-b617-4c19-9cba-41b0906dfdd5" (UID: "cc292d91-b617-4c19-9cba-41b0906dfdd5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.444512 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.444556 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc292d91-b617-4c19-9cba-41b0906dfdd5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.444571 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6d89\" (UniqueName: \"kubernetes.io/projected/cc292d91-b617-4c19-9cba-41b0906dfdd5-kube-api-access-q6d89\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.669965 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-5gqvz_4a2a1306-2eff-4fc2-ac8c-8bb461353abd/control-plane-machine-set-operator/0.log" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.773269 4647 generic.go:334] "Generic (PLEG): container finished" podID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerID="1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46" exitCode=0 Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.773326 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5v478" event={"ID":"cc292d91-b617-4c19-9cba-41b0906dfdd5","Type":"ContainerDied","Data":"1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46"} Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.773386 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5v478" event={"ID":"cc292d91-b617-4c19-9cba-41b0906dfdd5","Type":"ContainerDied","Data":"d763c9dbd721405aee203df3a4a28e14a49c39349e08b879c43c983deb2e7140"} Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.773422 4647 scope.go:117] "RemoveContainer" containerID="1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.773436 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5v478" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.793393 4647 scope.go:117] "RemoveContainer" containerID="48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.839770 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5v478"] Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.840762 4647 scope.go:117] "RemoveContainer" containerID="80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.856067 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5v478"] Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.865340 4647 scope.go:117] "RemoveContainer" containerID="1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46" Nov 28 16:57:51 crc kubenswrapper[4647]: E1128 16:57:51.866406 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46\": container with ID starting with 1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46 not found: ID does not exist" containerID="1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.866552 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46"} err="failed to get container status \"1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46\": rpc error: code = NotFound desc = could not find container \"1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46\": container with ID starting with 1d0739642a64842fa119272a34cc101cb4f567e048ef57b301b1c59c96359c46 not found: ID does not exist" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.866650 4647 scope.go:117] "RemoveContainer" containerID="48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae" Nov 28 16:57:51 crc kubenswrapper[4647]: E1128 16:57:51.867053 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae\": container with ID starting with 48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae not found: ID does not exist" containerID="48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.867095 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae"} err="failed to get container status \"48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae\": rpc error: code = NotFound desc = could not find container \"48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae\": container with ID starting with 48762c8ae89a4407a85c35695c5755ed768c7bac4d466f0bf0bcc1a3d50f13ae not found: ID does not exist" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.867125 4647 scope.go:117] "RemoveContainer" containerID="80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b" Nov 28 16:57:51 crc kubenswrapper[4647]: E1128 16:57:51.867436 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b\": container with ID starting with 80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b not found: ID does not exist" containerID="80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.867516 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b"} err="failed to get container status \"80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b\": rpc error: code = NotFound desc = could not find container \"80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b\": container with ID starting with 80595b3d525c4e059f14b9b1eba32e8383f4860a1860d2e1ec6a050666623c3b not found: ID does not exist" Nov 28 16:57:51 crc kubenswrapper[4647]: I1128 16:57:51.955662 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6t8hg_4408688c-7115-4338-9b06-e30b0ed30399/kube-rbac-proxy/0.log" Nov 28 16:57:52 crc kubenswrapper[4647]: I1128 16:57:52.023317 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6t8hg_4408688c-7115-4338-9b06-e30b0ed30399/machine-api-operator/0.log" Nov 28 16:57:52 crc kubenswrapper[4647]: I1128 16:57:52.407661 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc292d91-b617-4c19-9cba-41b0906dfdd5" path="/var/lib/kubelet/pods/cc292d91-b617-4c19-9cba-41b0906dfdd5/volumes" Nov 28 16:58:05 crc kubenswrapper[4647]: I1128 16:58:05.701031 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-s96fs_7e8c6733-5ed1-46be-b533-e3b03a586fd5/cert-manager-controller/0.log" Nov 28 16:58:05 crc kubenswrapper[4647]: I1128 16:58:05.868699 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-6dlfq_15cf6e95-03c8-49db-9029-2fd5f51e14c1/cert-manager-cainjector/0.log" Nov 28 16:58:06 crc kubenswrapper[4647]: I1128 16:58:06.001170 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-7qjfk_24afbb10-6dd7-4492-9340-287a2b45d450/cert-manager-webhook/0.log" Nov 28 16:58:17 crc kubenswrapper[4647]: I1128 16:58:17.022708 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:58:17 crc kubenswrapper[4647]: I1128 16:58:17.023404 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:58:17 crc kubenswrapper[4647]: I1128 16:58:17.023536 4647 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" Nov 28 16:58:17 crc kubenswrapper[4647]: I1128 16:58:17.024662 4647 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea"} pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:58:17 crc kubenswrapper[4647]: I1128 16:58:17.024765 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" containerID="cri-o://5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" gracePeriod=600 Nov 28 16:58:17 crc kubenswrapper[4647]: E1128 16:58:17.154670 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:58:18 crc kubenswrapper[4647]: I1128 16:58:18.004124 4647 generic.go:334] "Generic (PLEG): container finished" podID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" exitCode=0 Nov 28 16:58:18 crc kubenswrapper[4647]: I1128 16:58:18.004505 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerDied","Data":"5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea"} Nov 28 16:58:18 crc kubenswrapper[4647]: I1128 16:58:18.004541 4647 scope.go:117] "RemoveContainer" containerID="b1ee664078d688ed8630f5b377418d63110b6bb1129ca815ab136e42057bf59b" Nov 28 16:58:18 crc kubenswrapper[4647]: I1128 16:58:18.005223 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 16:58:18 crc kubenswrapper[4647]: E1128 16:58:18.005498 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:58:19 crc kubenswrapper[4647]: I1128 16:58:19.069055 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-njhtz_247287a6-7c4f-4dae-ab2e-9e9d144fcdd4/nmstate-console-plugin/0.log" Nov 28 16:58:19 crc kubenswrapper[4647]: I1128 16:58:19.192957 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-2vmmg_e0476ba4-d83b-4a10-9898-fe3b6b05f76e/nmstate-handler/0.log" Nov 28 16:58:19 crc kubenswrapper[4647]: I1128 16:58:19.282526 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-wxjgv_6b13a440-59b0-440a-bcf5-164d5f29ceba/kube-rbac-proxy/0.log" Nov 28 16:58:19 crc kubenswrapper[4647]: I1128 16:58:19.355002 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-wxjgv_6b13a440-59b0-440a-bcf5-164d5f29ceba/nmstate-metrics/0.log" Nov 28 16:58:19 crc kubenswrapper[4647]: I1128 16:58:19.531308 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-2k6zn_22b9216f-dccd-4cc4-ac15-770a5edc610b/nmstate-operator/0.log" Nov 28 16:58:19 crc kubenswrapper[4647]: I1128 16:58:19.571240 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-fs4lf_8110d8bf-8ce5-415f-857c-6a89c9729b32/nmstate-webhook/0.log" Nov 28 16:58:28 crc kubenswrapper[4647]: I1128 16:58:28.394327 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 16:58:28 crc kubenswrapper[4647]: E1128 16:58:28.394938 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:58:34 crc kubenswrapper[4647]: I1128 16:58:34.574216 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-xqlc5_c4214344-1c2e-48f0-a1cb-c0a0414c8e77/controller/0.log" Nov 28 16:58:34 crc kubenswrapper[4647]: I1128 16:58:34.579621 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-xqlc5_c4214344-1c2e-48f0-a1cb-c0a0414c8e77/kube-rbac-proxy/0.log" Nov 28 16:58:34 crc kubenswrapper[4647]: I1128 16:58:34.823337 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-frr-files/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.005544 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-frr-files/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.051818 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-reloader/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.076766 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-reloader/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.077405 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-metrics/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.267253 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-reloader/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.279379 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-metrics/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.285289 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-metrics/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.302339 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-frr-files/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.514530 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-frr-files/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.521896 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-metrics/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.599167 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/controller/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.604912 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/cp-reloader/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.753025 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/frr-metrics/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.843571 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/kube-rbac-proxy/0.log" Nov 28 16:58:35 crc kubenswrapper[4647]: I1128 16:58:35.895749 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/kube-rbac-proxy-frr/0.log" Nov 28 16:58:36 crc kubenswrapper[4647]: I1128 16:58:36.016091 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/reloader/0.log" Nov 28 16:58:36 crc kubenswrapper[4647]: I1128 16:58:36.126445 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-74jft_6850e82e-6e47-4f7e-a861-aa1e2f29b468/frr-k8s-webhook-server/0.log" Nov 28 16:58:36 crc kubenswrapper[4647]: I1128 16:58:36.358977 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6b8f4b57d8-9nmkq_660b76dc-783c-4df5-938b-7df9e2af467a/manager/0.log" Nov 28 16:58:36 crc kubenswrapper[4647]: I1128 16:58:36.658054 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6846cd54fc-hhsz2_a68e1898-f4f8-468a-98fc-03e4f01397e4/webhook-server/0.log" Nov 28 16:58:36 crc kubenswrapper[4647]: I1128 16:58:36.850252 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2csd4_8ee0a7ea-967a-457c-9d3b-1eb46c99b719/kube-rbac-proxy/0.log" Nov 28 16:58:36 crc kubenswrapper[4647]: I1128 16:58:36.980183 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vnbr8_dc709627-0843-4e8f-8485-5ac40ec5b457/frr/0.log" Nov 28 16:58:37 crc kubenswrapper[4647]: I1128 16:58:37.242561 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2csd4_8ee0a7ea-967a-457c-9d3b-1eb46c99b719/speaker/0.log" Nov 28 16:58:43 crc kubenswrapper[4647]: I1128 16:58:43.394503 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 16:58:43 crc kubenswrapper[4647]: E1128 16:58:43.395460 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.035123 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/util/0.log" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.318572 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/util/0.log" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.320580 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/pull/0.log" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.374150 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/pull/0.log" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.557592 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/util/0.log" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.608253 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/pull/0.log" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.617585 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftbrw8_6b13d8bc-0908-4b22-8a7e-ac2d3d4e0618/extract/0.log" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.732917 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/util/0.log" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.885146 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/pull/0.log" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.904837 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/pull/0.log" Nov 28 16:58:50 crc kubenswrapper[4647]: I1128 16:58:50.905989 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/util/0.log" Nov 28 16:58:51 crc kubenswrapper[4647]: I1128 16:58:51.112871 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/util/0.log" Nov 28 16:58:51 crc kubenswrapper[4647]: I1128 16:58:51.150797 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/pull/0.log" Nov 28 16:58:51 crc kubenswrapper[4647]: I1128 16:58:51.163953 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83k2n2k_df1a3745-7401-4dbd-be91-533022effe1a/extract/0.log" Nov 28 16:58:51 crc kubenswrapper[4647]: I1128 16:58:51.341072 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-utilities/0.log" Nov 28 16:58:51 crc kubenswrapper[4647]: I1128 16:58:51.536295 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-utilities/0.log" Nov 28 16:58:51 crc kubenswrapper[4647]: I1128 16:58:51.579984 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-content/0.log" Nov 28 16:58:51 crc kubenswrapper[4647]: I1128 16:58:51.590051 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-content/0.log" Nov 28 16:58:51 crc kubenswrapper[4647]: I1128 16:58:51.785146 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-content/0.log" Nov 28 16:58:51 crc kubenswrapper[4647]: I1128 16:58:51.940974 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/extract-utilities/0.log" Nov 28 16:58:52 crc kubenswrapper[4647]: I1128 16:58:52.071704 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-utilities/0.log" Nov 28 16:58:52 crc kubenswrapper[4647]: I1128 16:58:52.363698 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-content/0.log" Nov 28 16:58:52 crc kubenswrapper[4647]: I1128 16:58:52.369330 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-utilities/0.log" Nov 28 16:58:52 crc kubenswrapper[4647]: I1128 16:58:52.489120 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-content/0.log" Nov 28 16:58:52 crc kubenswrapper[4647]: I1128 16:58:52.522443 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbztr_b124cbca-8cc0-4bc6-9870-fa348da63a06/registry-server/0.log" Nov 28 16:58:52 crc kubenswrapper[4647]: I1128 16:58:52.737848 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-utilities/0.log" Nov 28 16:58:52 crc kubenswrapper[4647]: I1128 16:58:52.738459 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/extract-content/0.log" Nov 28 16:58:53 crc kubenswrapper[4647]: I1128 16:58:53.035667 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-ql2cs_5b489703-e3e6-4ef2-b993-766bd6e12094/marketplace-operator/1.log" Nov 28 16:58:53 crc kubenswrapper[4647]: I1128 16:58:53.108994 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-ql2cs_5b489703-e3e6-4ef2-b993-766bd6e12094/marketplace-operator/2.log" Nov 28 16:58:53 crc kubenswrapper[4647]: I1128 16:58:53.395524 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5dk2w_f7fc41cd-7d94-436d-9c27-bf868f6b7212/registry-server/0.log" Nov 28 16:58:53 crc kubenswrapper[4647]: I1128 16:58:53.436533 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-utilities/0.log" Nov 28 16:58:53 crc kubenswrapper[4647]: I1128 16:58:53.579752 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-utilities/0.log" Nov 28 16:58:53 crc kubenswrapper[4647]: I1128 16:58:53.616573 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-content/0.log" Nov 28 16:58:53 crc kubenswrapper[4647]: I1128 16:58:53.624113 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-content/0.log" Nov 28 16:58:53 crc kubenswrapper[4647]: I1128 16:58:53.880768 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-utilities/0.log" Nov 28 16:58:53 crc kubenswrapper[4647]: I1128 16:58:53.899993 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/extract-content/0.log" Nov 28 16:58:54 crc kubenswrapper[4647]: I1128 16:58:54.032770 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rgvlr_fbde6f92-55d8-482f-9dac-9b40fbae6c53/registry-server/0.log" Nov 28 16:58:54 crc kubenswrapper[4647]: I1128 16:58:54.175236 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-utilities/0.log" Nov 28 16:58:54 crc kubenswrapper[4647]: I1128 16:58:54.366164 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-content/0.log" Nov 28 16:58:54 crc kubenswrapper[4647]: I1128 16:58:54.387498 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-content/0.log" Nov 28 16:58:54 crc kubenswrapper[4647]: I1128 16:58:54.403771 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-utilities/0.log" Nov 28 16:58:54 crc kubenswrapper[4647]: I1128 16:58:54.578325 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-content/0.log" Nov 28 16:58:54 crc kubenswrapper[4647]: I1128 16:58:54.601481 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/extract-utilities/0.log" Nov 28 16:58:55 crc kubenswrapper[4647]: I1128 16:58:55.120005 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-8s5cf_e3671234-ff46-40ec-95b4-c5dd9192ed13/registry-server/0.log" Nov 28 16:58:57 crc kubenswrapper[4647]: I1128 16:58:57.395085 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 16:58:57 crc kubenswrapper[4647]: E1128 16:58:57.395638 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:59:08 crc kubenswrapper[4647]: I1128 16:59:08.394238 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 16:59:08 crc kubenswrapper[4647]: E1128 16:59:08.395078 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:59:20 crc kubenswrapper[4647]: I1128 16:59:20.399913 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 16:59:20 crc kubenswrapper[4647]: E1128 16:59:20.400739 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:59:35 crc kubenswrapper[4647]: I1128 16:59:35.394367 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 16:59:35 crc kubenswrapper[4647]: E1128 16:59:35.395121 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 16:59:49 crc kubenswrapper[4647]: I1128 16:59:49.395372 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 16:59:49 crc kubenswrapper[4647]: E1128 16:59:49.396576 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.164311 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z"] Nov 28 17:00:00 crc kubenswrapper[4647]: E1128 17:00:00.166023 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.166065 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4647]: E1128 17:00:00.166102 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerName="extract-utilities" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.166112 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerName="extract-utilities" Nov 28 17:00:00 crc kubenswrapper[4647]: E1128 17:00:00.166119 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerName="extract-content" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.166126 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerName="extract-content" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.166399 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc292d91-b617-4c19-9cba-41b0906dfdd5" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.167178 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.172078 4647 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.172520 4647 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.175826 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z"] Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.218391 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/615a03f5-60f0-4f19-922f-0e3f6782c484-secret-volume\") pod \"collect-profiles-29405820-rv95z\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.218542 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xl8t4\" (UniqueName: \"kubernetes.io/projected/615a03f5-60f0-4f19-922f-0e3f6782c484-kube-api-access-xl8t4\") pod \"collect-profiles-29405820-rv95z\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.218571 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/615a03f5-60f0-4f19-922f-0e3f6782c484-config-volume\") pod \"collect-profiles-29405820-rv95z\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.324497 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xl8t4\" (UniqueName: \"kubernetes.io/projected/615a03f5-60f0-4f19-922f-0e3f6782c484-kube-api-access-xl8t4\") pod \"collect-profiles-29405820-rv95z\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.324583 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/615a03f5-60f0-4f19-922f-0e3f6782c484-config-volume\") pod \"collect-profiles-29405820-rv95z\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.324728 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/615a03f5-60f0-4f19-922f-0e3f6782c484-secret-volume\") pod \"collect-profiles-29405820-rv95z\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.326672 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/615a03f5-60f0-4f19-922f-0e3f6782c484-config-volume\") pod \"collect-profiles-29405820-rv95z\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.334098 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/615a03f5-60f0-4f19-922f-0e3f6782c484-secret-volume\") pod \"collect-profiles-29405820-rv95z\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.342318 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xl8t4\" (UniqueName: \"kubernetes.io/projected/615a03f5-60f0-4f19-922f-0e3f6782c484-kube-api-access-xl8t4\") pod \"collect-profiles-29405820-rv95z\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.400831 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:00:00 crc kubenswrapper[4647]: E1128 17:00:00.401644 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:00:00 crc kubenswrapper[4647]: I1128 17:00:00.489091 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:01 crc kubenswrapper[4647]: I1128 17:00:01.031127 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z"] Nov 28 17:00:01 crc kubenswrapper[4647]: I1128 17:00:01.075151 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" event={"ID":"615a03f5-60f0-4f19-922f-0e3f6782c484","Type":"ContainerStarted","Data":"350554aae03519152b1e2bd13bc5ec691c2ca21ded505ade926c95ceb5309abb"} Nov 28 17:00:02 crc kubenswrapper[4647]: I1128 17:00:02.092471 4647 generic.go:334] "Generic (PLEG): container finished" podID="615a03f5-60f0-4f19-922f-0e3f6782c484" containerID="8f55d824418dc9b99968aa3c6ea32b36dd181c39f1b411c7bbbcfcc8aafcdb69" exitCode=0 Nov 28 17:00:02 crc kubenswrapper[4647]: I1128 17:00:02.092522 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" event={"ID":"615a03f5-60f0-4f19-922f-0e3f6782c484","Type":"ContainerDied","Data":"8f55d824418dc9b99968aa3c6ea32b36dd181c39f1b411c7bbbcfcc8aafcdb69"} Nov 28 17:00:03 crc kubenswrapper[4647]: I1128 17:00:03.398560 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:03 crc kubenswrapper[4647]: I1128 17:00:03.518651 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/615a03f5-60f0-4f19-922f-0e3f6782c484-secret-volume\") pod \"615a03f5-60f0-4f19-922f-0e3f6782c484\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " Nov 28 17:00:03 crc kubenswrapper[4647]: I1128 17:00:03.518864 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xl8t4\" (UniqueName: \"kubernetes.io/projected/615a03f5-60f0-4f19-922f-0e3f6782c484-kube-api-access-xl8t4\") pod \"615a03f5-60f0-4f19-922f-0e3f6782c484\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " Nov 28 17:00:03 crc kubenswrapper[4647]: I1128 17:00:03.518967 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/615a03f5-60f0-4f19-922f-0e3f6782c484-config-volume\") pod \"615a03f5-60f0-4f19-922f-0e3f6782c484\" (UID: \"615a03f5-60f0-4f19-922f-0e3f6782c484\") " Nov 28 17:00:03 crc kubenswrapper[4647]: I1128 17:00:03.519811 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/615a03f5-60f0-4f19-922f-0e3f6782c484-config-volume" (OuterVolumeSpecName: "config-volume") pod "615a03f5-60f0-4f19-922f-0e3f6782c484" (UID: "615a03f5-60f0-4f19-922f-0e3f6782c484"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:00:03 crc kubenswrapper[4647]: I1128 17:00:03.525859 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/615a03f5-60f0-4f19-922f-0e3f6782c484-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "615a03f5-60f0-4f19-922f-0e3f6782c484" (UID: "615a03f5-60f0-4f19-922f-0e3f6782c484"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:00:03 crc kubenswrapper[4647]: I1128 17:00:03.527455 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/615a03f5-60f0-4f19-922f-0e3f6782c484-kube-api-access-xl8t4" (OuterVolumeSpecName: "kube-api-access-xl8t4") pod "615a03f5-60f0-4f19-922f-0e3f6782c484" (UID: "615a03f5-60f0-4f19-922f-0e3f6782c484"). InnerVolumeSpecName "kube-api-access-xl8t4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:00:03 crc kubenswrapper[4647]: I1128 17:00:03.621474 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xl8t4\" (UniqueName: \"kubernetes.io/projected/615a03f5-60f0-4f19-922f-0e3f6782c484-kube-api-access-xl8t4\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:03 crc kubenswrapper[4647]: I1128 17:00:03.621532 4647 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/615a03f5-60f0-4f19-922f-0e3f6782c484-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:03 crc kubenswrapper[4647]: I1128 17:00:03.621545 4647 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/615a03f5-60f0-4f19-922f-0e3f6782c484-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:04 crc kubenswrapper[4647]: I1128 17:00:04.110319 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" event={"ID":"615a03f5-60f0-4f19-922f-0e3f6782c484","Type":"ContainerDied","Data":"350554aae03519152b1e2bd13bc5ec691c2ca21ded505ade926c95ceb5309abb"} Nov 28 17:00:04 crc kubenswrapper[4647]: I1128 17:00:04.110656 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="350554aae03519152b1e2bd13bc5ec691c2ca21ded505ade926c95ceb5309abb" Nov 28 17:00:04 crc kubenswrapper[4647]: I1128 17:00:04.110579 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-rv95z" Nov 28 17:00:04 crc kubenswrapper[4647]: I1128 17:00:04.476282 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj"] Nov 28 17:00:04 crc kubenswrapper[4647]: I1128 17:00:04.484929 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-9dzkj"] Nov 28 17:00:06 crc kubenswrapper[4647]: I1128 17:00:06.430215 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f968e9d-0276-495e-abc8-88f232ea1344" path="/var/lib/kubelet/pods/5f968e9d-0276-495e-abc8-88f232ea1344/volumes" Nov 28 17:00:15 crc kubenswrapper[4647]: I1128 17:00:15.394927 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:00:15 crc kubenswrapper[4647]: E1128 17:00:15.397962 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:00:20 crc kubenswrapper[4647]: I1128 17:00:20.477765 4647 scope.go:117] "RemoveContainer" containerID="65fe59caa46602b8417c214b0293302a7297165f4aa6e6241499809395859a76" Nov 28 17:00:20 crc kubenswrapper[4647]: I1128 17:00:20.499271 4647 scope.go:117] "RemoveContainer" containerID="80b25b44307a1e1d1bec3641d2aca649f81d704e9f4d55c06eaeff5c0be82c3a" Nov 28 17:00:20 crc kubenswrapper[4647]: I1128 17:00:20.530193 4647 scope.go:117] "RemoveContainer" containerID="85e1d88a43dc35e8080a2212b809ba3e46e8ab01d6d7b524cc37f01708126700" Nov 28 17:00:20 crc kubenswrapper[4647]: I1128 17:00:20.600295 4647 scope.go:117] "RemoveContainer" containerID="fa992f2eb32b9a62bdb02773c52b1f32a9f1203889ba282cb9c3d245c78a0230" Nov 28 17:00:27 crc kubenswrapper[4647]: I1128 17:00:27.394272 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:00:27 crc kubenswrapper[4647]: E1128 17:00:27.395253 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:00:39 crc kubenswrapper[4647]: I1128 17:00:39.394439 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:00:39 crc kubenswrapper[4647]: E1128 17:00:39.395299 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:00:51 crc kubenswrapper[4647]: I1128 17:00:51.395952 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:00:51 crc kubenswrapper[4647]: E1128 17:00:51.396622 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.147734 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29405821-kwnb9"] Nov 28 17:01:00 crc kubenswrapper[4647]: E1128 17:01:00.148897 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="615a03f5-60f0-4f19-922f-0e3f6782c484" containerName="collect-profiles" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.148917 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="615a03f5-60f0-4f19-922f-0e3f6782c484" containerName="collect-profiles" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.149151 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="615a03f5-60f0-4f19-922f-0e3f6782c484" containerName="collect-profiles" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.149966 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.168406 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405821-kwnb9"] Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.308515 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-fernet-keys\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.308598 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-config-data\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.308619 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-combined-ca-bundle\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.308758 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnb6r\" (UniqueName: \"kubernetes.io/projected/24506434-7396-4ca6-b0eb-333469a2280b-kube-api-access-bnb6r\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.410390 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnb6r\" (UniqueName: \"kubernetes.io/projected/24506434-7396-4ca6-b0eb-333469a2280b-kube-api-access-bnb6r\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.410794 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-fernet-keys\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.410857 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-config-data\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.410880 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-combined-ca-bundle\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.416460 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-fernet-keys\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.422382 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-config-data\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.432355 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-combined-ca-bundle\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.435167 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnb6r\" (UniqueName: \"kubernetes.io/projected/24506434-7396-4ca6-b0eb-333469a2280b-kube-api-access-bnb6r\") pod \"keystone-cron-29405821-kwnb9\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.472222 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:00 crc kubenswrapper[4647]: I1128 17:01:00.852896 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405821-kwnb9"] Nov 28 17:01:01 crc kubenswrapper[4647]: I1128 17:01:01.701578 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405821-kwnb9" event={"ID":"24506434-7396-4ca6-b0eb-333469a2280b","Type":"ContainerStarted","Data":"cb00815a1740cd9ea0d212b6576a098cea427a1c08984c3f42f0a45981905d1c"} Nov 28 17:01:01 crc kubenswrapper[4647]: I1128 17:01:01.703013 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405821-kwnb9" event={"ID":"24506434-7396-4ca6-b0eb-333469a2280b","Type":"ContainerStarted","Data":"5f3ff0f00ce36654113b00f3636c17bd004ebe283fd0ece78e3755b2ef9f0ff9"} Nov 28 17:01:01 crc kubenswrapper[4647]: I1128 17:01:01.723434 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29405821-kwnb9" podStartSLOduration=1.723400238 podStartE2EDuration="1.723400238s" podCreationTimestamp="2025-11-28 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:01:01.716223776 +0000 UTC m=+5791.563830197" watchObservedRunningTime="2025-11-28 17:01:01.723400238 +0000 UTC m=+5791.571006659" Nov 28 17:01:02 crc kubenswrapper[4647]: I1128 17:01:02.394475 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:01:02 crc kubenswrapper[4647]: E1128 17:01:02.394786 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:01:04 crc kubenswrapper[4647]: I1128 17:01:04.731297 4647 generic.go:334] "Generic (PLEG): container finished" podID="24506434-7396-4ca6-b0eb-333469a2280b" containerID="cb00815a1740cd9ea0d212b6576a098cea427a1c08984c3f42f0a45981905d1c" exitCode=0 Nov 28 17:01:04 crc kubenswrapper[4647]: I1128 17:01:04.731839 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405821-kwnb9" event={"ID":"24506434-7396-4ca6-b0eb-333469a2280b","Type":"ContainerDied","Data":"cb00815a1740cd9ea0d212b6576a098cea427a1c08984c3f42f0a45981905d1c"} Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.193264 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.365144 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-fernet-keys\") pod \"24506434-7396-4ca6-b0eb-333469a2280b\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.366268 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bnb6r\" (UniqueName: \"kubernetes.io/projected/24506434-7396-4ca6-b0eb-333469a2280b-kube-api-access-bnb6r\") pod \"24506434-7396-4ca6-b0eb-333469a2280b\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.366340 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-combined-ca-bundle\") pod \"24506434-7396-4ca6-b0eb-333469a2280b\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.366377 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-config-data\") pod \"24506434-7396-4ca6-b0eb-333469a2280b\" (UID: \"24506434-7396-4ca6-b0eb-333469a2280b\") " Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.370890 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "24506434-7396-4ca6-b0eb-333469a2280b" (UID: "24506434-7396-4ca6-b0eb-333469a2280b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.404785 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24506434-7396-4ca6-b0eb-333469a2280b-kube-api-access-bnb6r" (OuterVolumeSpecName: "kube-api-access-bnb6r") pod "24506434-7396-4ca6-b0eb-333469a2280b" (UID: "24506434-7396-4ca6-b0eb-333469a2280b"). InnerVolumeSpecName "kube-api-access-bnb6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.415769 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "24506434-7396-4ca6-b0eb-333469a2280b" (UID: "24506434-7396-4ca6-b0eb-333469a2280b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.454728 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-config-data" (OuterVolumeSpecName: "config-data") pod "24506434-7396-4ca6-b0eb-333469a2280b" (UID: "24506434-7396-4ca6-b0eb-333469a2280b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.468919 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bnb6r\" (UniqueName: \"kubernetes.io/projected/24506434-7396-4ca6-b0eb-333469a2280b-kube-api-access-bnb6r\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.468957 4647 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.468968 4647 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.468978 4647 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/24506434-7396-4ca6-b0eb-333469a2280b-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.749074 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405821-kwnb9" event={"ID":"24506434-7396-4ca6-b0eb-333469a2280b","Type":"ContainerDied","Data":"5f3ff0f00ce36654113b00f3636c17bd004ebe283fd0ece78e3755b2ef9f0ff9"} Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.749335 4647 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f3ff0f00ce36654113b00f3636c17bd004ebe283fd0ece78e3755b2ef9f0ff9" Nov 28 17:01:06 crc kubenswrapper[4647]: I1128 17:01:06.749146 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405821-kwnb9" Nov 28 17:01:15 crc kubenswrapper[4647]: I1128 17:01:15.394999 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:01:15 crc kubenswrapper[4647]: E1128 17:01:15.395915 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:01:16 crc kubenswrapper[4647]: I1128 17:01:16.849723 4647 generic.go:334] "Generic (PLEG): container finished" podID="3974b935-7762-4f74-a9ed-bda6ae385160" containerID="862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471" exitCode=0 Nov 28 17:01:16 crc kubenswrapper[4647]: I1128 17:01:16.849907 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jqx7c/must-gather-h2fw7" event={"ID":"3974b935-7762-4f74-a9ed-bda6ae385160","Type":"ContainerDied","Data":"862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471"} Nov 28 17:01:16 crc kubenswrapper[4647]: I1128 17:01:16.850780 4647 scope.go:117] "RemoveContainer" containerID="862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471" Nov 28 17:01:17 crc kubenswrapper[4647]: I1128 17:01:17.620047 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-jqx7c_must-gather-h2fw7_3974b935-7762-4f74-a9ed-bda6ae385160/gather/0.log" Nov 28 17:01:20 crc kubenswrapper[4647]: I1128 17:01:20.718897 4647 scope.go:117] "RemoveContainer" containerID="5f6c74f2a576d6183a39bd4d48e00e95610150f65ee55e7d6f054d74a2a7144c" Nov 28 17:01:26 crc kubenswrapper[4647]: I1128 17:01:26.395721 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:01:26 crc kubenswrapper[4647]: E1128 17:01:26.396492 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:01:33 crc kubenswrapper[4647]: I1128 17:01:33.982017 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jqx7c/must-gather-h2fw7"] Nov 28 17:01:33 crc kubenswrapper[4647]: I1128 17:01:33.982861 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-jqx7c/must-gather-h2fw7" podUID="3974b935-7762-4f74-a9ed-bda6ae385160" containerName="copy" containerID="cri-o://211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77" gracePeriod=2 Nov 28 17:01:33 crc kubenswrapper[4647]: I1128 17:01:33.992205 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jqx7c/must-gather-h2fw7"] Nov 28 17:01:34 crc kubenswrapper[4647]: I1128 17:01:34.415283 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-jqx7c_must-gather-h2fw7_3974b935-7762-4f74-a9ed-bda6ae385160/copy/0.log" Nov 28 17:01:34 crc kubenswrapper[4647]: I1128 17:01:34.415936 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/must-gather-h2fw7" Nov 28 17:01:34 crc kubenswrapper[4647]: I1128 17:01:34.434693 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h88cs\" (UniqueName: \"kubernetes.io/projected/3974b935-7762-4f74-a9ed-bda6ae385160-kube-api-access-h88cs\") pod \"3974b935-7762-4f74-a9ed-bda6ae385160\" (UID: \"3974b935-7762-4f74-a9ed-bda6ae385160\") " Nov 28 17:01:34 crc kubenswrapper[4647]: I1128 17:01:34.434752 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3974b935-7762-4f74-a9ed-bda6ae385160-must-gather-output\") pod \"3974b935-7762-4f74-a9ed-bda6ae385160\" (UID: \"3974b935-7762-4f74-a9ed-bda6ae385160\") " Nov 28 17:01:34 crc kubenswrapper[4647]: I1128 17:01:34.463652 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3974b935-7762-4f74-a9ed-bda6ae385160-kube-api-access-h88cs" (OuterVolumeSpecName: "kube-api-access-h88cs") pod "3974b935-7762-4f74-a9ed-bda6ae385160" (UID: "3974b935-7762-4f74-a9ed-bda6ae385160"). InnerVolumeSpecName "kube-api-access-h88cs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:01:34 crc kubenswrapper[4647]: I1128 17:01:34.536804 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h88cs\" (UniqueName: \"kubernetes.io/projected/3974b935-7762-4f74-a9ed-bda6ae385160-kube-api-access-h88cs\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:34 crc kubenswrapper[4647]: I1128 17:01:34.641961 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3974b935-7762-4f74-a9ed-bda6ae385160-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "3974b935-7762-4f74-a9ed-bda6ae385160" (UID: "3974b935-7762-4f74-a9ed-bda6ae385160"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:01:34 crc kubenswrapper[4647]: I1128 17:01:34.740271 4647 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3974b935-7762-4f74-a9ed-bda6ae385160-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:35 crc kubenswrapper[4647]: I1128 17:01:35.059168 4647 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-jqx7c_must-gather-h2fw7_3974b935-7762-4f74-a9ed-bda6ae385160/copy/0.log" Nov 28 17:01:35 crc kubenswrapper[4647]: I1128 17:01:35.060006 4647 generic.go:334] "Generic (PLEG): container finished" podID="3974b935-7762-4f74-a9ed-bda6ae385160" containerID="211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77" exitCode=143 Nov 28 17:01:35 crc kubenswrapper[4647]: I1128 17:01:35.060060 4647 scope.go:117] "RemoveContainer" containerID="211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77" Nov 28 17:01:35 crc kubenswrapper[4647]: I1128 17:01:35.060098 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jqx7c/must-gather-h2fw7" Nov 28 17:01:35 crc kubenswrapper[4647]: I1128 17:01:35.093514 4647 scope.go:117] "RemoveContainer" containerID="862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471" Nov 28 17:01:35 crc kubenswrapper[4647]: I1128 17:01:35.138941 4647 scope.go:117] "RemoveContainer" containerID="211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77" Nov 28 17:01:35 crc kubenswrapper[4647]: E1128 17:01:35.139261 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77\": container with ID starting with 211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77 not found: ID does not exist" containerID="211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77" Nov 28 17:01:35 crc kubenswrapper[4647]: I1128 17:01:35.139294 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77"} err="failed to get container status \"211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77\": rpc error: code = NotFound desc = could not find container \"211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77\": container with ID starting with 211d9e649fd3ba15c283bff6880e0f5a314415d09362ed450883d04286daec77 not found: ID does not exist" Nov 28 17:01:35 crc kubenswrapper[4647]: I1128 17:01:35.139318 4647 scope.go:117] "RemoveContainer" containerID="862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471" Nov 28 17:01:35 crc kubenswrapper[4647]: E1128 17:01:35.139635 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471\": container with ID starting with 862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471 not found: ID does not exist" containerID="862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471" Nov 28 17:01:35 crc kubenswrapper[4647]: I1128 17:01:35.139661 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471"} err="failed to get container status \"862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471\": rpc error: code = NotFound desc = could not find container \"862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471\": container with ID starting with 862b06a830aced333137e642695c7a6269a45f612ed3539c11fa44f7c5232471 not found: ID does not exist" Nov 28 17:01:36 crc kubenswrapper[4647]: I1128 17:01:36.411879 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3974b935-7762-4f74-a9ed-bda6ae385160" path="/var/lib/kubelet/pods/3974b935-7762-4f74-a9ed-bda6ae385160/volumes" Nov 28 17:01:40 crc kubenswrapper[4647]: I1128 17:01:40.404696 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:01:40 crc kubenswrapper[4647]: E1128 17:01:40.405761 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:01:51 crc kubenswrapper[4647]: I1128 17:01:51.395205 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:01:51 crc kubenswrapper[4647]: E1128 17:01:51.396035 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:02:05 crc kubenswrapper[4647]: I1128 17:02:05.394378 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:02:05 crc kubenswrapper[4647]: E1128 17:02:05.395730 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:02:16 crc kubenswrapper[4647]: I1128 17:02:16.394305 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:02:16 crc kubenswrapper[4647]: E1128 17:02:16.394940 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:02:20 crc kubenswrapper[4647]: I1128 17:02:20.789249 4647 scope.go:117] "RemoveContainer" containerID="4856d40c6bc43034ee356d069c548b3dee7c528d4935d4e8cddbf67092e2138b" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.214868 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8hmrj"] Nov 28 17:02:28 crc kubenswrapper[4647]: E1128 17:02:28.216216 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24506434-7396-4ca6-b0eb-333469a2280b" containerName="keystone-cron" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.216231 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="24506434-7396-4ca6-b0eb-333469a2280b" containerName="keystone-cron" Nov 28 17:02:28 crc kubenswrapper[4647]: E1128 17:02:28.216268 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3974b935-7762-4f74-a9ed-bda6ae385160" containerName="gather" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.216274 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3974b935-7762-4f74-a9ed-bda6ae385160" containerName="gather" Nov 28 17:02:28 crc kubenswrapper[4647]: E1128 17:02:28.216285 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3974b935-7762-4f74-a9ed-bda6ae385160" containerName="copy" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.216291 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="3974b935-7762-4f74-a9ed-bda6ae385160" containerName="copy" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.216476 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="24506434-7396-4ca6-b0eb-333469a2280b" containerName="keystone-cron" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.216496 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="3974b935-7762-4f74-a9ed-bda6ae385160" containerName="copy" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.216510 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="3974b935-7762-4f74-a9ed-bda6ae385160" containerName="gather" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.217788 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.254208 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8hmrj"] Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.358301 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e699750-e91e-4ec4-8700-b5e7de0aa4f4-utilities\") pod \"redhat-marketplace-8hmrj\" (UID: \"6e699750-e91e-4ec4-8700-b5e7de0aa4f4\") " pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.358386 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e699750-e91e-4ec4-8700-b5e7de0aa4f4-catalog-content\") pod \"redhat-marketplace-8hmrj\" (UID: \"6e699750-e91e-4ec4-8700-b5e7de0aa4f4\") " pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.358504 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsqlt\" (UniqueName: \"kubernetes.io/projected/6e699750-e91e-4ec4-8700-b5e7de0aa4f4-kube-api-access-gsqlt\") pod \"redhat-marketplace-8hmrj\" (UID: \"6e699750-e91e-4ec4-8700-b5e7de0aa4f4\") " pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.394852 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:02:28 crc kubenswrapper[4647]: E1128 17:02:28.395209 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.459850 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e699750-e91e-4ec4-8700-b5e7de0aa4f4-catalog-content\") pod \"redhat-marketplace-8hmrj\" (UID: \"6e699750-e91e-4ec4-8700-b5e7de0aa4f4\") " pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.460038 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsqlt\" (UniqueName: \"kubernetes.io/projected/6e699750-e91e-4ec4-8700-b5e7de0aa4f4-kube-api-access-gsqlt\") pod \"redhat-marketplace-8hmrj\" (UID: \"6e699750-e91e-4ec4-8700-b5e7de0aa4f4\") " pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.460114 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e699750-e91e-4ec4-8700-b5e7de0aa4f4-utilities\") pod \"redhat-marketplace-8hmrj\" (UID: \"6e699750-e91e-4ec4-8700-b5e7de0aa4f4\") " pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.460641 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e699750-e91e-4ec4-8700-b5e7de0aa4f4-catalog-content\") pod \"redhat-marketplace-8hmrj\" (UID: \"6e699750-e91e-4ec4-8700-b5e7de0aa4f4\") " pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.460918 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e699750-e91e-4ec4-8700-b5e7de0aa4f4-utilities\") pod \"redhat-marketplace-8hmrj\" (UID: \"6e699750-e91e-4ec4-8700-b5e7de0aa4f4\") " pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.484261 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsqlt\" (UniqueName: \"kubernetes.io/projected/6e699750-e91e-4ec4-8700-b5e7de0aa4f4-kube-api-access-gsqlt\") pod \"redhat-marketplace-8hmrj\" (UID: \"6e699750-e91e-4ec4-8700-b5e7de0aa4f4\") " pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:28 crc kubenswrapper[4647]: I1128 17:02:28.542155 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:29 crc kubenswrapper[4647]: I1128 17:02:29.059766 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8hmrj"] Nov 28 17:02:29 crc kubenswrapper[4647]: I1128 17:02:29.659497 4647 generic.go:334] "Generic (PLEG): container finished" podID="6e699750-e91e-4ec4-8700-b5e7de0aa4f4" containerID="c7c5b8012c4796d084ed9f162a47e7de6f36e6771b3b41537178fda2a23facc8" exitCode=0 Nov 28 17:02:29 crc kubenswrapper[4647]: I1128 17:02:29.659581 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hmrj" event={"ID":"6e699750-e91e-4ec4-8700-b5e7de0aa4f4","Type":"ContainerDied","Data":"c7c5b8012c4796d084ed9f162a47e7de6f36e6771b3b41537178fda2a23facc8"} Nov 28 17:02:29 crc kubenswrapper[4647]: I1128 17:02:29.660053 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hmrj" event={"ID":"6e699750-e91e-4ec4-8700-b5e7de0aa4f4","Type":"ContainerStarted","Data":"202625e7c8def128f23f22fa98242543766195d141769f4cc9a67ca4089ff31e"} Nov 28 17:02:33 crc kubenswrapper[4647]: I1128 17:02:33.729927 4647 generic.go:334] "Generic (PLEG): container finished" podID="6e699750-e91e-4ec4-8700-b5e7de0aa4f4" containerID="cdef8973873738f880be3063837b480762e687d182580d949f48a72dcc90ff1c" exitCode=0 Nov 28 17:02:33 crc kubenswrapper[4647]: I1128 17:02:33.730370 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hmrj" event={"ID":"6e699750-e91e-4ec4-8700-b5e7de0aa4f4","Type":"ContainerDied","Data":"cdef8973873738f880be3063837b480762e687d182580d949f48a72dcc90ff1c"} Nov 28 17:02:34 crc kubenswrapper[4647]: I1128 17:02:34.742589 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8hmrj" event={"ID":"6e699750-e91e-4ec4-8700-b5e7de0aa4f4","Type":"ContainerStarted","Data":"120cf0b97ac5d3004b74820079218983d256b1251945da028709c70f5f4bc3c1"} Nov 28 17:02:34 crc kubenswrapper[4647]: I1128 17:02:34.769910 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8hmrj" podStartSLOduration=2.166973665 podStartE2EDuration="6.769886084s" podCreationTimestamp="2025-11-28 17:02:28 +0000 UTC" firstStartedPulling="2025-11-28 17:02:29.661744778 +0000 UTC m=+5879.509351209" lastFinishedPulling="2025-11-28 17:02:34.264657207 +0000 UTC m=+5884.112263628" observedRunningTime="2025-11-28 17:02:34.761981402 +0000 UTC m=+5884.609587823" watchObservedRunningTime="2025-11-28 17:02:34.769886084 +0000 UTC m=+5884.617492525" Nov 28 17:02:38 crc kubenswrapper[4647]: I1128 17:02:38.543336 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:38 crc kubenswrapper[4647]: I1128 17:02:38.543715 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:38 crc kubenswrapper[4647]: I1128 17:02:38.594731 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:42 crc kubenswrapper[4647]: I1128 17:02:42.397011 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:02:42 crc kubenswrapper[4647]: E1128 17:02:42.397907 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:02:48 crc kubenswrapper[4647]: I1128 17:02:48.602730 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8hmrj" Nov 28 17:02:48 crc kubenswrapper[4647]: I1128 17:02:48.738068 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8hmrj"] Nov 28 17:02:48 crc kubenswrapper[4647]: I1128 17:02:48.773105 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rgvlr"] Nov 28 17:02:48 crc kubenswrapper[4647]: I1128 17:02:48.773369 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rgvlr" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerName="registry-server" containerID="cri-o://0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624" gracePeriod=2 Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.278916 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.408625 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-catalog-content\") pod \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.408838 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlng2\" (UniqueName: \"kubernetes.io/projected/fbde6f92-55d8-482f-9dac-9b40fbae6c53-kube-api-access-hlng2\") pod \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.408894 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-utilities\") pod \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\" (UID: \"fbde6f92-55d8-482f-9dac-9b40fbae6c53\") " Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.412469 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-utilities" (OuterVolumeSpecName: "utilities") pod "fbde6f92-55d8-482f-9dac-9b40fbae6c53" (UID: "fbde6f92-55d8-482f-9dac-9b40fbae6c53"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.429538 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fbde6f92-55d8-482f-9dac-9b40fbae6c53" (UID: "fbde6f92-55d8-482f-9dac-9b40fbae6c53"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.434856 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbde6f92-55d8-482f-9dac-9b40fbae6c53-kube-api-access-hlng2" (OuterVolumeSpecName: "kube-api-access-hlng2") pod "fbde6f92-55d8-482f-9dac-9b40fbae6c53" (UID: "fbde6f92-55d8-482f-9dac-9b40fbae6c53"). InnerVolumeSpecName "kube-api-access-hlng2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.512178 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlng2\" (UniqueName: \"kubernetes.io/projected/fbde6f92-55d8-482f-9dac-9b40fbae6c53-kube-api-access-hlng2\") on node \"crc\" DevicePath \"\"" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.512500 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.512510 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbde6f92-55d8-482f-9dac-9b40fbae6c53-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.891927 4647 generic.go:334] "Generic (PLEG): container finished" podID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerID="0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624" exitCode=0 Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.891965 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgvlr" event={"ID":"fbde6f92-55d8-482f-9dac-9b40fbae6c53","Type":"ContainerDied","Data":"0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624"} Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.891987 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rgvlr" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.892008 4647 scope.go:117] "RemoveContainer" containerID="0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.891994 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgvlr" event={"ID":"fbde6f92-55d8-482f-9dac-9b40fbae6c53","Type":"ContainerDied","Data":"37e87fbd3809d60cce4708e235a676c995b71b44c0579188fcb5cc568d21e9ce"} Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.911555 4647 scope.go:117] "RemoveContainer" containerID="118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.926798 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rgvlr"] Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.933486 4647 scope.go:117] "RemoveContainer" containerID="d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.936559 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rgvlr"] Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.976573 4647 scope.go:117] "RemoveContainer" containerID="0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624" Nov 28 17:02:49 crc kubenswrapper[4647]: E1128 17:02:49.977030 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624\": container with ID starting with 0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624 not found: ID does not exist" containerID="0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.977141 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624"} err="failed to get container status \"0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624\": rpc error: code = NotFound desc = could not find container \"0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624\": container with ID starting with 0701437faef83a064545f7fe387611dd9710f18c63030e2be19f5fa170ffa624 not found: ID does not exist" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.977228 4647 scope.go:117] "RemoveContainer" containerID="118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae" Nov 28 17:02:49 crc kubenswrapper[4647]: E1128 17:02:49.977712 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae\": container with ID starting with 118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae not found: ID does not exist" containerID="118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.977754 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae"} err="failed to get container status \"118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae\": rpc error: code = NotFound desc = could not find container \"118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae\": container with ID starting with 118ace697a2ca0bd8f459bc13c668e44caa80fd05511b3aa2d81e54fed0bf2ae not found: ID does not exist" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.977781 4647 scope.go:117] "RemoveContainer" containerID="d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968" Nov 28 17:02:49 crc kubenswrapper[4647]: E1128 17:02:49.978053 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968\": container with ID starting with d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968 not found: ID does not exist" containerID="d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968" Nov 28 17:02:49 crc kubenswrapper[4647]: I1128 17:02:49.978078 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968"} err="failed to get container status \"d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968\": rpc error: code = NotFound desc = could not find container \"d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968\": container with ID starting with d650bde0fd29ab428afd5e4b50eb2cca213fca04003be073ec6f4d9d95f54968 not found: ID does not exist" Nov 28 17:02:50 crc kubenswrapper[4647]: I1128 17:02:50.406505 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" path="/var/lib/kubelet/pods/fbde6f92-55d8-482f-9dac-9b40fbae6c53/volumes" Nov 28 17:02:56 crc kubenswrapper[4647]: I1128 17:02:56.394930 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:02:56 crc kubenswrapper[4647]: E1128 17:02:56.395766 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:03:09 crc kubenswrapper[4647]: I1128 17:03:09.394774 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:03:09 crc kubenswrapper[4647]: E1128 17:03:09.396352 4647 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7mwt4_openshift-machine-config-operator(008f163b-b2fe-4238-90b5-96f0d89f3fb5)\"" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" Nov 28 17:03:23 crc kubenswrapper[4647]: I1128 17:03:23.394231 4647 scope.go:117] "RemoveContainer" containerID="5f39d2d28dad545af64640ef6525f3481a82b16e6adec64e6e1eb57c10bc5bea" Nov 28 17:03:24 crc kubenswrapper[4647]: I1128 17:03:24.295634 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" event={"ID":"008f163b-b2fe-4238-90b5-96f0d89f3fb5","Type":"ContainerStarted","Data":"9de4573786a8008281d6df13515dd54b1abcbde370adcd491a2cb3053545ac2d"} Nov 28 17:04:30 crc kubenswrapper[4647]: I1128 17:04:30.844031 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fp6cx"] Nov 28 17:04:30 crc kubenswrapper[4647]: E1128 17:04:30.845100 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerName="extract-utilities" Nov 28 17:04:30 crc kubenswrapper[4647]: I1128 17:04:30.845115 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerName="extract-utilities" Nov 28 17:04:30 crc kubenswrapper[4647]: E1128 17:04:30.845137 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerName="extract-content" Nov 28 17:04:30 crc kubenswrapper[4647]: I1128 17:04:30.845144 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerName="extract-content" Nov 28 17:04:30 crc kubenswrapper[4647]: E1128 17:04:30.845169 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerName="registry-server" Nov 28 17:04:30 crc kubenswrapper[4647]: I1128 17:04:30.845175 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerName="registry-server" Nov 28 17:04:30 crc kubenswrapper[4647]: I1128 17:04:30.845349 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbde6f92-55d8-482f-9dac-9b40fbae6c53" containerName="registry-server" Nov 28 17:04:30 crc kubenswrapper[4647]: I1128 17:04:30.847897 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:30 crc kubenswrapper[4647]: I1128 17:04:30.866345 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fp6cx"] Nov 28 17:04:30 crc kubenswrapper[4647]: I1128 17:04:30.946715 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-utilities\") pod \"community-operators-fp6cx\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:30 crc kubenswrapper[4647]: I1128 17:04:30.947095 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t992c\" (UniqueName: \"kubernetes.io/projected/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-kube-api-access-t992c\") pod \"community-operators-fp6cx\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:30 crc kubenswrapper[4647]: I1128 17:04:30.947238 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-catalog-content\") pod \"community-operators-fp6cx\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:31 crc kubenswrapper[4647]: I1128 17:04:31.049156 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-catalog-content\") pod \"community-operators-fp6cx\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:31 crc kubenswrapper[4647]: I1128 17:04:31.049234 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-utilities\") pod \"community-operators-fp6cx\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:31 crc kubenswrapper[4647]: I1128 17:04:31.049340 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t992c\" (UniqueName: \"kubernetes.io/projected/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-kube-api-access-t992c\") pod \"community-operators-fp6cx\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:31 crc kubenswrapper[4647]: I1128 17:04:31.049639 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-catalog-content\") pod \"community-operators-fp6cx\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:31 crc kubenswrapper[4647]: I1128 17:04:31.049703 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-utilities\") pod \"community-operators-fp6cx\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:31 crc kubenswrapper[4647]: I1128 17:04:31.073160 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t992c\" (UniqueName: \"kubernetes.io/projected/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-kube-api-access-t992c\") pod \"community-operators-fp6cx\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:31 crc kubenswrapper[4647]: I1128 17:04:31.195336 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:31 crc kubenswrapper[4647]: W1128 17:04:31.869947 4647 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod321c6a9d_141c_4e12_a7a6_cf0dcff672d5.slice/crio-7273b8bb8dc926495245a71ee36f44fc1429647d163a91ade64bdf0145146462 WatchSource:0}: Error finding container 7273b8bb8dc926495245a71ee36f44fc1429647d163a91ade64bdf0145146462: Status 404 returned error can't find the container with id 7273b8bb8dc926495245a71ee36f44fc1429647d163a91ade64bdf0145146462 Nov 28 17:04:31 crc kubenswrapper[4647]: I1128 17:04:31.875616 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fp6cx"] Nov 28 17:04:31 crc kubenswrapper[4647]: I1128 17:04:31.935936 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fp6cx" event={"ID":"321c6a9d-141c-4e12-a7a6-cf0dcff672d5","Type":"ContainerStarted","Data":"7273b8bb8dc926495245a71ee36f44fc1429647d163a91ade64bdf0145146462"} Nov 28 17:04:32 crc kubenswrapper[4647]: I1128 17:04:32.959716 4647 generic.go:334] "Generic (PLEG): container finished" podID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerID="9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f" exitCode=0 Nov 28 17:04:32 crc kubenswrapper[4647]: I1128 17:04:32.959945 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fp6cx" event={"ID":"321c6a9d-141c-4e12-a7a6-cf0dcff672d5","Type":"ContainerDied","Data":"9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f"} Nov 28 17:04:32 crc kubenswrapper[4647]: I1128 17:04:32.962234 4647 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:04:37 crc kubenswrapper[4647]: I1128 17:04:37.007463 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fp6cx" event={"ID":"321c6a9d-141c-4e12-a7a6-cf0dcff672d5","Type":"ContainerStarted","Data":"131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f"} Nov 28 17:04:38 crc kubenswrapper[4647]: I1128 17:04:38.018283 4647 generic.go:334] "Generic (PLEG): container finished" podID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerID="131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f" exitCode=0 Nov 28 17:04:38 crc kubenswrapper[4647]: I1128 17:04:38.018324 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fp6cx" event={"ID":"321c6a9d-141c-4e12-a7a6-cf0dcff672d5","Type":"ContainerDied","Data":"131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f"} Nov 28 17:04:41 crc kubenswrapper[4647]: I1128 17:04:41.047673 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fp6cx" event={"ID":"321c6a9d-141c-4e12-a7a6-cf0dcff672d5","Type":"ContainerStarted","Data":"d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa"} Nov 28 17:04:42 crc kubenswrapper[4647]: I1128 17:04:42.084034 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fp6cx" podStartSLOduration=4.902773803 podStartE2EDuration="12.084015703s" podCreationTimestamp="2025-11-28 17:04:30 +0000 UTC" firstStartedPulling="2025-11-28 17:04:32.96197032 +0000 UTC m=+6002.809576751" lastFinishedPulling="2025-11-28 17:04:40.14321223 +0000 UTC m=+6009.990818651" observedRunningTime="2025-11-28 17:04:42.07644144 +0000 UTC m=+6011.924047861" watchObservedRunningTime="2025-11-28 17:04:42.084015703 +0000 UTC m=+6011.931622124" Nov 28 17:04:51 crc kubenswrapper[4647]: I1128 17:04:51.197848 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:51 crc kubenswrapper[4647]: I1128 17:04:51.198845 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:51 crc kubenswrapper[4647]: I1128 17:04:51.248270 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:52 crc kubenswrapper[4647]: I1128 17:04:52.213344 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:52 crc kubenswrapper[4647]: I1128 17:04:52.264258 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fp6cx"] Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.187746 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fp6cx" podUID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerName="registry-server" containerID="cri-o://d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa" gracePeriod=2 Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.701014 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.855988 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-utilities\") pod \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.856093 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t992c\" (UniqueName: \"kubernetes.io/projected/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-kube-api-access-t992c\") pod \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.856275 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-catalog-content\") pod \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\" (UID: \"321c6a9d-141c-4e12-a7a6-cf0dcff672d5\") " Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.856962 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-utilities" (OuterVolumeSpecName: "utilities") pod "321c6a9d-141c-4e12-a7a6-cf0dcff672d5" (UID: "321c6a9d-141c-4e12-a7a6-cf0dcff672d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.857303 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.867631 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-kube-api-access-t992c" (OuterVolumeSpecName: "kube-api-access-t992c") pod "321c6a9d-141c-4e12-a7a6-cf0dcff672d5" (UID: "321c6a9d-141c-4e12-a7a6-cf0dcff672d5"). InnerVolumeSpecName "kube-api-access-t992c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.908321 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "321c6a9d-141c-4e12-a7a6-cf0dcff672d5" (UID: "321c6a9d-141c-4e12-a7a6-cf0dcff672d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.958655 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t992c\" (UniqueName: \"kubernetes.io/projected/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-kube-api-access-t992c\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:54 crc kubenswrapper[4647]: I1128 17:04:54.958693 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/321c6a9d-141c-4e12-a7a6-cf0dcff672d5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.214173 4647 generic.go:334] "Generic (PLEG): container finished" podID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerID="d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa" exitCode=0 Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.214213 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fp6cx" event={"ID":"321c6a9d-141c-4e12-a7a6-cf0dcff672d5","Type":"ContainerDied","Data":"d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa"} Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.214240 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fp6cx" Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.214263 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fp6cx" event={"ID":"321c6a9d-141c-4e12-a7a6-cf0dcff672d5","Type":"ContainerDied","Data":"7273b8bb8dc926495245a71ee36f44fc1429647d163a91ade64bdf0145146462"} Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.214284 4647 scope.go:117] "RemoveContainer" containerID="d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa" Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.248403 4647 scope.go:117] "RemoveContainer" containerID="131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f" Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.260207 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fp6cx"] Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.272486 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fp6cx"] Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.278733 4647 scope.go:117] "RemoveContainer" containerID="9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f" Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.318715 4647 scope.go:117] "RemoveContainer" containerID="d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa" Nov 28 17:04:55 crc kubenswrapper[4647]: E1128 17:04:55.319294 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa\": container with ID starting with d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa not found: ID does not exist" containerID="d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa" Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.319383 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa"} err="failed to get container status \"d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa\": rpc error: code = NotFound desc = could not find container \"d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa\": container with ID starting with d39cab9ea480b85600141ff4bc9ee96a67b4592228b1247b734751ed848a8cfa not found: ID does not exist" Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.319493 4647 scope.go:117] "RemoveContainer" containerID="131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f" Nov 28 17:04:55 crc kubenswrapper[4647]: E1128 17:04:55.319776 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f\": container with ID starting with 131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f not found: ID does not exist" containerID="131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f" Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.319864 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f"} err="failed to get container status \"131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f\": rpc error: code = NotFound desc = could not find container \"131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f\": container with ID starting with 131d4de878fbf33f313c11ad54206432973e481d12a146bce01ab08fa613004f not found: ID does not exist" Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.319934 4647 scope.go:117] "RemoveContainer" containerID="9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f" Nov 28 17:04:55 crc kubenswrapper[4647]: E1128 17:04:55.320169 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f\": container with ID starting with 9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f not found: ID does not exist" containerID="9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f" Nov 28 17:04:55 crc kubenswrapper[4647]: I1128 17:04:55.320241 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f"} err="failed to get container status \"9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f\": rpc error: code = NotFound desc = could not find container \"9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f\": container with ID starting with 9e981677a2e89733361c1b671e61bdade5a7ce321e3ac40d5108c92481ba8a4f not found: ID does not exist" Nov 28 17:04:56 crc kubenswrapper[4647]: I1128 17:04:56.408829 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" path="/var/lib/kubelet/pods/321c6a9d-141c-4e12-a7a6-cf0dcff672d5/volumes" Nov 28 17:05:10 crc kubenswrapper[4647]: I1128 17:05:10.834932 4647 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fm6nw"] Nov 28 17:05:10 crc kubenswrapper[4647]: E1128 17:05:10.836858 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerName="registry-server" Nov 28 17:05:10 crc kubenswrapper[4647]: I1128 17:05:10.836940 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerName="registry-server" Nov 28 17:05:10 crc kubenswrapper[4647]: E1128 17:05:10.837004 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerName="extract-content" Nov 28 17:05:10 crc kubenswrapper[4647]: I1128 17:05:10.837063 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerName="extract-content" Nov 28 17:05:10 crc kubenswrapper[4647]: E1128 17:05:10.837142 4647 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerName="extract-utilities" Nov 28 17:05:10 crc kubenswrapper[4647]: I1128 17:05:10.837201 4647 state_mem.go:107] "Deleted CPUSet assignment" podUID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerName="extract-utilities" Nov 28 17:05:10 crc kubenswrapper[4647]: I1128 17:05:10.837507 4647 memory_manager.go:354] "RemoveStaleState removing state" podUID="321c6a9d-141c-4e12-a7a6-cf0dcff672d5" containerName="registry-server" Nov 28 17:05:10 crc kubenswrapper[4647]: I1128 17:05:10.838914 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:10 crc kubenswrapper[4647]: I1128 17:05:10.855261 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fm6nw"] Nov 28 17:05:10 crc kubenswrapper[4647]: I1128 17:05:10.995760 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pm9hs\" (UniqueName: \"kubernetes.io/projected/0acac121-efcf-4061-a64a-f3fcfb565224-kube-api-access-pm9hs\") pod \"redhat-operators-fm6nw\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:10 crc kubenswrapper[4647]: I1128 17:05:10.995845 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-catalog-content\") pod \"redhat-operators-fm6nw\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:10 crc kubenswrapper[4647]: I1128 17:05:10.995885 4647 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-utilities\") pod \"redhat-operators-fm6nw\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:11 crc kubenswrapper[4647]: I1128 17:05:11.097494 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pm9hs\" (UniqueName: \"kubernetes.io/projected/0acac121-efcf-4061-a64a-f3fcfb565224-kube-api-access-pm9hs\") pod \"redhat-operators-fm6nw\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:11 crc kubenswrapper[4647]: I1128 17:05:11.097587 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-catalog-content\") pod \"redhat-operators-fm6nw\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:11 crc kubenswrapper[4647]: I1128 17:05:11.097629 4647 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-utilities\") pod \"redhat-operators-fm6nw\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:11 crc kubenswrapper[4647]: I1128 17:05:11.098088 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-catalog-content\") pod \"redhat-operators-fm6nw\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:11 crc kubenswrapper[4647]: I1128 17:05:11.098100 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-utilities\") pod \"redhat-operators-fm6nw\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:11 crc kubenswrapper[4647]: I1128 17:05:11.122213 4647 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pm9hs\" (UniqueName: \"kubernetes.io/projected/0acac121-efcf-4061-a64a-f3fcfb565224-kube-api-access-pm9hs\") pod \"redhat-operators-fm6nw\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:11 crc kubenswrapper[4647]: I1128 17:05:11.172156 4647 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:11 crc kubenswrapper[4647]: I1128 17:05:11.730374 4647 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fm6nw"] Nov 28 17:05:12 crc kubenswrapper[4647]: I1128 17:05:12.408170 4647 generic.go:334] "Generic (PLEG): container finished" podID="0acac121-efcf-4061-a64a-f3fcfb565224" containerID="4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904" exitCode=0 Nov 28 17:05:12 crc kubenswrapper[4647]: I1128 17:05:12.408256 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fm6nw" event={"ID":"0acac121-efcf-4061-a64a-f3fcfb565224","Type":"ContainerDied","Data":"4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904"} Nov 28 17:05:12 crc kubenswrapper[4647]: I1128 17:05:12.408310 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fm6nw" event={"ID":"0acac121-efcf-4061-a64a-f3fcfb565224","Type":"ContainerStarted","Data":"ba9d015cce7f152ad02a56c54b4b5bdec24c16f0ae288fa59ac767693d6b4339"} Nov 28 17:05:14 crc kubenswrapper[4647]: I1128 17:05:14.431610 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fm6nw" event={"ID":"0acac121-efcf-4061-a64a-f3fcfb565224","Type":"ContainerStarted","Data":"6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f"} Nov 28 17:05:18 crc kubenswrapper[4647]: I1128 17:05:18.466675 4647 generic.go:334] "Generic (PLEG): container finished" podID="0acac121-efcf-4061-a64a-f3fcfb565224" containerID="6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f" exitCode=0 Nov 28 17:05:18 crc kubenswrapper[4647]: I1128 17:05:18.466804 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fm6nw" event={"ID":"0acac121-efcf-4061-a64a-f3fcfb565224","Type":"ContainerDied","Data":"6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f"} Nov 28 17:05:19 crc kubenswrapper[4647]: I1128 17:05:19.498102 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fm6nw" event={"ID":"0acac121-efcf-4061-a64a-f3fcfb565224","Type":"ContainerStarted","Data":"33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0"} Nov 28 17:05:19 crc kubenswrapper[4647]: I1128 17:05:19.546617 4647 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fm6nw" podStartSLOduration=3.042620486 podStartE2EDuration="9.546562863s" podCreationTimestamp="2025-11-28 17:05:10 +0000 UTC" firstStartedPulling="2025-11-28 17:05:12.409909646 +0000 UTC m=+6042.257516067" lastFinishedPulling="2025-11-28 17:05:18.913852023 +0000 UTC m=+6048.761458444" observedRunningTime="2025-11-28 17:05:19.535001484 +0000 UTC m=+6049.382607915" watchObservedRunningTime="2025-11-28 17:05:19.546562863 +0000 UTC m=+6049.394169314" Nov 28 17:05:21 crc kubenswrapper[4647]: I1128 17:05:21.173236 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:21 crc kubenswrapper[4647]: I1128 17:05:21.173567 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:22 crc kubenswrapper[4647]: I1128 17:05:22.233339 4647 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fm6nw" podUID="0acac121-efcf-4061-a64a-f3fcfb565224" containerName="registry-server" probeResult="failure" output=< Nov 28 17:05:22 crc kubenswrapper[4647]: timeout: failed to connect service ":50051" within 1s Nov 28 17:05:22 crc kubenswrapper[4647]: > Nov 28 17:05:31 crc kubenswrapper[4647]: I1128 17:05:31.218359 4647 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:31 crc kubenswrapper[4647]: I1128 17:05:31.276866 4647 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:31 crc kubenswrapper[4647]: I1128 17:05:31.461516 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fm6nw"] Nov 28 17:05:32 crc kubenswrapper[4647]: I1128 17:05:32.620641 4647 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fm6nw" podUID="0acac121-efcf-4061-a64a-f3fcfb565224" containerName="registry-server" containerID="cri-o://33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0" gracePeriod=2 Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.112522 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.159133 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-utilities\") pod \"0acac121-efcf-4061-a64a-f3fcfb565224\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.159187 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-catalog-content\") pod \"0acac121-efcf-4061-a64a-f3fcfb565224\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.159401 4647 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pm9hs\" (UniqueName: \"kubernetes.io/projected/0acac121-efcf-4061-a64a-f3fcfb565224-kube-api-access-pm9hs\") pod \"0acac121-efcf-4061-a64a-f3fcfb565224\" (UID: \"0acac121-efcf-4061-a64a-f3fcfb565224\") " Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.160148 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-utilities" (OuterVolumeSpecName: "utilities") pod "0acac121-efcf-4061-a64a-f3fcfb565224" (UID: "0acac121-efcf-4061-a64a-f3fcfb565224"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.175633 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0acac121-efcf-4061-a64a-f3fcfb565224-kube-api-access-pm9hs" (OuterVolumeSpecName: "kube-api-access-pm9hs") pod "0acac121-efcf-4061-a64a-f3fcfb565224" (UID: "0acac121-efcf-4061-a64a-f3fcfb565224"). InnerVolumeSpecName "kube-api-access-pm9hs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.262159 4647 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.262197 4647 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pm9hs\" (UniqueName: \"kubernetes.io/projected/0acac121-efcf-4061-a64a-f3fcfb565224-kube-api-access-pm9hs\") on node \"crc\" DevicePath \"\"" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.289686 4647 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0acac121-efcf-4061-a64a-f3fcfb565224" (UID: "0acac121-efcf-4061-a64a-f3fcfb565224"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.367082 4647 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0acac121-efcf-4061-a64a-f3fcfb565224-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.633353 4647 generic.go:334] "Generic (PLEG): container finished" podID="0acac121-efcf-4061-a64a-f3fcfb565224" containerID="33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0" exitCode=0 Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.633462 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fm6nw" event={"ID":"0acac121-efcf-4061-a64a-f3fcfb565224","Type":"ContainerDied","Data":"33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0"} Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.633722 4647 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fm6nw" event={"ID":"0acac121-efcf-4061-a64a-f3fcfb565224","Type":"ContainerDied","Data":"ba9d015cce7f152ad02a56c54b4b5bdec24c16f0ae288fa59ac767693d6b4339"} Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.633746 4647 scope.go:117] "RemoveContainer" containerID="33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.633610 4647 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fm6nw" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.682216 4647 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fm6nw"] Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.682979 4647 scope.go:117] "RemoveContainer" containerID="6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.693749 4647 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fm6nw"] Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.724083 4647 scope.go:117] "RemoveContainer" containerID="4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.750137 4647 scope.go:117] "RemoveContainer" containerID="33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0" Nov 28 17:05:33 crc kubenswrapper[4647]: E1128 17:05:33.750717 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0\": container with ID starting with 33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0 not found: ID does not exist" containerID="33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.750747 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0"} err="failed to get container status \"33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0\": rpc error: code = NotFound desc = could not find container \"33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0\": container with ID starting with 33432bd9c88ee123e7c26a71774386037058dc31ab30c9a3d20ac2c63d4cf7e0 not found: ID does not exist" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.750770 4647 scope.go:117] "RemoveContainer" containerID="6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f" Nov 28 17:05:33 crc kubenswrapper[4647]: E1128 17:05:33.751278 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f\": container with ID starting with 6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f not found: ID does not exist" containerID="6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.751324 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f"} err="failed to get container status \"6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f\": rpc error: code = NotFound desc = could not find container \"6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f\": container with ID starting with 6cde4f8f1ea8332397ea81ba155437199b7f25492488cc6f9922ee5b3545778f not found: ID does not exist" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.751337 4647 scope.go:117] "RemoveContainer" containerID="4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904" Nov 28 17:05:33 crc kubenswrapper[4647]: E1128 17:05:33.751667 4647 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904\": container with ID starting with 4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904 not found: ID does not exist" containerID="4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904" Nov 28 17:05:33 crc kubenswrapper[4647]: I1128 17:05:33.751712 4647 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904"} err="failed to get container status \"4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904\": rpc error: code = NotFound desc = could not find container \"4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904\": container with ID starting with 4300173200dae528fb0cccd8079f4adfd030983cfdf5208a117cb3c558ca3904 not found: ID does not exist" Nov 28 17:05:34 crc kubenswrapper[4647]: I1128 17:05:34.405708 4647 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0acac121-efcf-4061-a64a-f3fcfb565224" path="/var/lib/kubelet/pods/0acac121-efcf-4061-a64a-f3fcfb565224/volumes" Nov 28 17:05:47 crc kubenswrapper[4647]: I1128 17:05:47.023204 4647 patch_prober.go:28] interesting pod/machine-config-daemon-7mwt4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:05:47 crc kubenswrapper[4647]: I1128 17:05:47.023750 4647 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7mwt4" podUID="008f163b-b2fe-4238-90b5-96f0d89f3fb5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112353162024444 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112353163017362 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112336654016513 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112336654015463 5ustar corecore